repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ColumbiaCMB/kid_readout | apps/data_taking_scripts/2018-02-08/sweep_and_stream.py | 1 | 3229 | import time
import numpy as np
from kid_readout.interactive import *
from kid_readout.equipment import hardware
from kid_readout.measurement import acquire
from kid_readout.roach import analog, hardware_tools, attenuator, r2heterodyne
ifboard = analog.HeterodyneMarkII()
setup = hardware.Hardware(ifboard)
ri = hardware_tools.r2_with_mk2()
ri.initialize()
#setup = hardware.Hardware()
#ri = hardware_tools.r2h14_with_mk2(initialize=True, use_config=False)
ri.iq_delay=-1
ri.set_fft_gain(6)
#initial_f0s = np.load('/data/readout/resonances/2018-03-14-medley-efield.npy')/1e6
initial_f0s = np.load('/data/readout/resonances/2018-03-14-medley-efield_2GHz.npy')/1e6
#ilo = 3200.
ilo = 2370.
ri.set_lo(ilo)
initial_lo = (ilo)
nf = len(initial_f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ", atonce
initial_f0s = np.concatenate((initial_f0s, np.arange(1, 1 + atonce - (nf % atonce)) + initial_f0s.max()))
print len(initial_f0s)
print initial_f0s
nsamp = 2**16
offsets = np.arange(-256,256)*512./nsamp
for (lo,f0s) in [(initial_lo,initial_f0s)]:
ri.set_lo(lo)
for dac_atten in [10]:
ri.set_dac_atten(dac_atten)
tic = time.time()
ncf = new_nc_file(suffix='%d_dB_dac' % dac_atten)
swpa = acquire.run_sweep(ri, tone_banks=f0s[None,:] + offsets[:,None], num_tone_samples=nsamp,
length_seconds=0, state=setup.state(), verbose=True,
description='dark sweep')
print "resonance sweep done", (time.time()-tic)/60.
ncf.write(swpa)
current_f0s = []
for sidx in range(f0s.shape[0]):
swp = swpa.sweep(sidx)
res = swp.resonator
print res.f_0, res.Q, res.current_result.redchi, (f0s[sidx]*1e6-res.f_0)
if np.abs(res.f_0 - f0s[sidx]*1e6) > 200e3:
current_f0s.append(f0s[sidx]*1e6)
print "using original frequency for ",f0s[sidx]
else:
current_f0s.append(res.f_0)
print "fits complete", (time.time()-tic)/60.
current_f0s = np.array(current_f0s)/1e6
current_f0s.sort()
if np.any(np.diff(current_f0s)<0.031):
print "problematic resonator collision:",current_f0s
print "deltas:",np.diff(current_f0s)
problems = np.flatnonzero(np.diff(current_f0s)<0.031)+1
current_f0s[problems] = (current_f0s[problems-1] + current_f0s[problems+1])/2.0
if np.any(np.diff(current_f0s)<0.031):
print "repeated problematic resonator collision:",current_f0s
print "deltas:",np.diff(current_f0s)
problems = np.flatnonzero(np.diff(current_f0s)<0.031)+1
current_f0s[problems] = (current_f0s[problems-1] + current_f0s[problems+1])/2.0
ri.set_tone_freqs(current_f0s,nsamp)
ri.select_fft_bins(range(f0s.shape[0]))
#raw_input("turn off compressor")
meas = ri.get_measurement(num_seconds=60., state=setup.state(),description='source off stream')
ncf.write(meas)
print "dac_atten %f done in %.1f minutes" % (dac_atten, (time.time()-tic)/60.)
ncf.close()
ri.set_dac_atten(20)
| bsd-2-clause |
tgoodlet/pytest | _pytest/freeze_support.py | 15 | 1201 | """
Provides a function to report all internal modules for using freezing tools
pytest
"""
def pytest_namespace():
return {'freeze_includes': freeze_includes}
def freeze_includes():
"""
Returns a list of module names used by py.test that should be
included by cx_freeze.
"""
import py
import _pytest
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=''):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
import os
import pkgutil
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + '.'
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
yield prefix + m
else:
yield prefix + name | mit |
tosfan4ever/hacktheplanet | config/settings/local.py | 1 | 2187 | # -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
# INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
# DEBUG_TOOLBAR_CONFIG = {
# 'DISABLE_PANELS': [
# 'debug_toolbar.panels.redirects.RedirectsPanel',
# ],
# 'SHOW_TEMPLATE_CONTEXT': True,
# }
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///htp")
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
| bsd-3-clause |
NaeiKinDus/xbmc | lib/libUPnP/Neptune/Build/Targets/x86-microsoft-win32-vs2010/Build.py | 198 | 2458 | #! /usr/bin/env python
"""
Visual Studio 2010 Build Script
$Id: Build.py 309 2011-09-21 05:44:28Z soothe $
"""
import os
import sys
import getopt
import subprocess
# Setup some path stuff
try:
if environ['VISUALSTUDIO_BIN']:
VSBINDIR = environ['VISUALSTUDIO_BIN']
except NameError:
# Use default value for visual studio.
VSBINDIR = 'C:/Program Files/Microsoft Visual Studio 10.0/Common7/IDE'
print 'VISUALSTUDIO_BIN not set. Trying default value:'
print ' ' + VSBINDIR
print ''
# ------------------------------------------------------------
# usage
# ------------------------------------------------------------
def usage(errMsg):
try:
print 'Error: %s' % (errMsg)
except NameError:
pass
print 'Usage: '
print ' %s -s <path to solution> -b [Release|Debug|etc.]' % (sys.argv[0])
print ''
print ' REQUIRED OPTIONS'
print '\t-s <solution>'
print '\t-b <configuration>'
print ''
print ' BUILD OPTIONS'
print '\t-c\tMake clean'
print '\t-r\tRe-build all'
# ------------------------------------------------------------
# main
# ------------------------------------------------------------
try:
opts, args = getopt.getopt(sys.argv[1:], "s:b:rc")
except getopt.GetoptError, (msg, opt):
# print 'Error: invalid argument, %s: %s' % (opt, msg)
usage('invalid argument, %s: %s' % (opt, msg))
sys.exit(2)
# Build options
doingBuild = False
rebuildAll = False
makeClean = False
for opt, arg in opts:
if opt == '-s':
solutionFile = arg
doingBuild = True
elif opt == '-b':
buildName = arg
doingBuild = True
elif opt == '-r':
rebuildAll = True
doingBuild = True
elif opt == '-c':
makeClean = True
if rebuildAll and makeClean:
usage('Error cannot specify -c and -r together')
sys.exit(2)
try:
buildSwitch = 'build'
if rebuildAll: buildSwitch = 'rebuild'
elif makeClean: buildSwitch = 'clean'
cmd_list = ['%s/devenv.com' % VSBINDIR, '/%s' % buildSwitch, buildName, solutionFile]
cmd = " ".join(cmd_list)
print 'Executing:'
print cmd
retVal = subprocess.call(cmd_list)
# only the least sig 8 bits are the real return value
if retVal != 0:
print cmd
print '** BUILD FAILURE **'
sys.exit(retVal)
except NameError, (name):
usage('missing argument %s' % (name))
sys.exit(2)
| gpl-2.0 |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/encodings/charmap.py | 860 | 2084 | """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalEncoder.__init__(self, errors)
self.mapping = mapping
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, self.mapping)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalDecoder.__init__(self, errors)
self.mapping = mapping
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, self.mapping)[0]
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='charmap',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
h3llrais3r/Auto-Subliminal | lib/tvdb_api_v2/models/series_images_query_param.py | 1 | 7103 | # coding: utf-8
"""
TheTVDB API v2
API v2 targets v1 functionality with a few minor additions. The API is accessible via https://api.thetvdb.com and provides the following REST endpoints in JSON format. How to use this API documentation ---------------- You may browse the API routes without authentication, but if you wish to send requests to the API and see response data, then you must authenticate. 1. Obtain a JWT token by `POST`ing to the `/login` route in the `Authentication` section with your API key and credentials. 1. Paste the JWT token from the response into the \"JWT Token\" field at the top of the page and click the 'Add Token' button. You will now be able to use the remaining routes to send requests to the API and get a response. Language Selection ---------------- Language selection is done via the `Accept-Language` header. At the moment, you may only pass one language abbreviation in the header at a time. Valid language abbreviations can be found at the `/languages` route.. Authentication ---------------- Authentication to use the API is similar to the How-to section above. Users must `POST` to the `/login` route with their API key and credentials in the following format in order to obtain a JWT token. `{\"apikey\":\"APIKEY\",\"username\":\"USERNAME\",\"userkey\":\"USERKEY\"}` Note that the username and key are ONLY required for the `/user` routes. The user's key is labled `Account Identifier` in the account section of the main site. The token is then used in all subsequent requests by providing it in the `Authorization` header. The header will look like: `Authorization: Bearer <yourJWTtoken>`. Currently, the token expires after 24 hours. You can `GET` the `/refresh_token` route to extend that expiration date. Versioning ---------------- You may request a different version of the API by including an `Accept` header in your request with the following format: `Accept:application/vnd.thetvdb.v$VERSION`. This documentation automatically uses the version seen at the top and bottom of the page.
OpenAPI spec version: 2.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SeriesImagesQueryParam(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'key_type': 'str',
'language_id': 'str',
'resolution': 'list[str]',
'sub_key': 'list[str]'
}
attribute_map = {
'key_type': 'keyType',
'language_id': 'languageId',
'resolution': 'resolution',
'sub_key': 'subKey'
}
def __init__(self, key_type=None, language_id=None, resolution=None, sub_key=None):
"""
SeriesImagesQueryParam - a model defined in Swagger
"""
self._key_type = None
self._language_id = None
self._resolution = None
self._sub_key = None
if key_type is not None:
self.key_type = key_type
if language_id is not None:
self.language_id = language_id
if resolution is not None:
self.resolution = resolution
if sub_key is not None:
self.sub_key = sub_key
@property
def key_type(self):
"""
Gets the key_type of this SeriesImagesQueryParam.
:return: The key_type of this SeriesImagesQueryParam.
:rtype: str
"""
return self._key_type
@key_type.setter
def key_type(self, key_type):
"""
Sets the key_type of this SeriesImagesQueryParam.
:param key_type: The key_type of this SeriesImagesQueryParam.
:type: str
"""
self._key_type = key_type
@property
def language_id(self):
"""
Gets the language_id of this SeriesImagesQueryParam.
:return: The language_id of this SeriesImagesQueryParam.
:rtype: str
"""
return self._language_id
@language_id.setter
def language_id(self, language_id):
"""
Sets the language_id of this SeriesImagesQueryParam.
:param language_id: The language_id of this SeriesImagesQueryParam.
:type: str
"""
self._language_id = language_id
@property
def resolution(self):
"""
Gets the resolution of this SeriesImagesQueryParam.
:return: The resolution of this SeriesImagesQueryParam.
:rtype: list[str]
"""
return self._resolution
@resolution.setter
def resolution(self, resolution):
"""
Sets the resolution of this SeriesImagesQueryParam.
:param resolution: The resolution of this SeriesImagesQueryParam.
:type: list[str]
"""
self._resolution = resolution
@property
def sub_key(self):
"""
Gets the sub_key of this SeriesImagesQueryParam.
:return: The sub_key of this SeriesImagesQueryParam.
:rtype: list[str]
"""
return self._sub_key
@sub_key.setter
def sub_key(self, sub_key):
"""
Sets the sub_key of this SeriesImagesQueryParam.
:param sub_key: The sub_key of this SeriesImagesQueryParam.
:type: list[str]
"""
self._sub_key = sub_key
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SeriesImagesQueryParam):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 |
mwytock/cvxpy | cvxpy/problems/solvers/ecos_intf.py | 11 | 5068 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.interface as intf
import cvxpy.settings as s
from cvxpy.problems.solvers.solver import Solver
class ECOS(Solver):
"""An interface for the ECOS solver.
"""
# Solver capabilities.
LP_CAPABLE = True
SOCP_CAPABLE = True
SDP_CAPABLE = False
EXP_CAPABLE = True
MIP_CAPABLE = False
# EXITCODES from ECOS
# ECOS_OPTIMAL (0) Problem solved to optimality
# ECOS_PINF (1) Found certificate of primal infeasibility
# ECOS_DINF (2) Found certificate of dual infeasibility
# ECOS_INACC_OFFSET (10) Offset exitflag at inaccurate results
# ECOS_MAXIT (-1) Maximum number of iterations reached
# ECOS_NUMERICS (-2) Search direction unreliable
# ECOS_OUTCONE (-3) s or z got outside the cone, numerics?
# ECOS_SIGINT (-4) solver interrupted by a signal/ctrl-c
# ECOS_FATAL (-7) Unknown problem in solver
# Map of ECOS status to CVXPY status.
STATUS_MAP = {0: s.OPTIMAL,
1: s.INFEASIBLE,
2: s.UNBOUNDED,
10: s.OPTIMAL_INACCURATE,
11: s.INFEASIBLE_INACCURATE,
12: s.UNBOUNDED_INACCURATE,
-1: s.SOLVER_ERROR,
-2: s.SOLVER_ERROR,
-3: s.SOLVER_ERROR,
-4: s.SOLVER_ERROR,
-7: s.SOLVER_ERROR}
def import_solver(self):
"""Imports the solver.
"""
import ecos
def name(self):
"""The name of the solver.
"""
return s.ECOS
def matrix_intf(self):
"""The interface for matrices passed to the solver.
"""
return intf.DEFAULT_SPARSE_INTF
def vec_intf(self):
"""The interface for vectors passed to the solver.
"""
return intf.DEFAULT_INTF
def split_constr(self, constr_map):
"""Extracts the equality, inequality, and nonlinear constraints.
Parameters
----------
constr_map : dict
A dict of the canonicalized constraints.
Returns
-------
tuple
(eq_constr, ineq_constr, nonlin_constr)
"""
return (constr_map[s.EQ], constr_map[s.LEQ], [])
def solve(self, objective, constraints, cached_data,
warm_start, verbose, solver_opts):
"""Returns the result of the call to the solver.
Parameters
----------
objective : LinOp
The canonicalized objective.
constraints : list
The list of canonicalized cosntraints.
cached_data : dict
A map of solver name to cached problem data.
warm_start : bool
Not used.
verbose : bool
Should the solver print output?
solver_opts : dict
Additional arguments for the solver.
Returns
-------
tuple
(status, optimal value, primal, equality dual, inequality dual)
"""
import ecos
data = self.get_problem_data(objective, constraints, cached_data)
data[s.DIMS]['e'] = data[s.DIMS][s.EXP_DIM]
results_dict = ecos.solve(data[s.C], data[s.G], data[s.H],
data[s.DIMS], data[s.A], data[s.B],
verbose=verbose,
**solver_opts)
return self.format_results(results_dict, data, cached_data)
def format_results(self, results_dict, data, cached_data):
"""Converts the solver output into standard form.
Parameters
----------
results_dict : dict
The solver output.
data : dict
Information about the problem.
cached_data : dict
A map of solver name to cached problem data.
Returns
-------
dict
The solver output in standard form.
"""
new_results = {}
status = self.STATUS_MAP[results_dict['info']['exitFlag']]
new_results[s.STATUS] = status
if new_results[s.STATUS] in s.SOLUTION_PRESENT:
primal_val = results_dict['info']['pcost']
new_results[s.VALUE] = primal_val + data[s.OFFSET]
new_results[s.PRIMAL] = results_dict['x']
new_results[s.EQ_DUAL] = results_dict['y']
new_results[s.INEQ_DUAL] = results_dict['z']
return new_results
| gpl-3.0 |
stackforge/python-jenkins | tests/jobs/test_create.py | 1 | 4230 | import json
from mock import patch
import jenkins
from tests.jobs.base import JenkinsJobsTestBase
class JenkinsCreateJobTest(JenkinsJobsTestBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_simple(self, jenkins_mock):
jenkins_mock.side_effect = [
jenkins.NotFoundException(),
None,
json.dumps({'name': 'Test Job'}),
]
self.j.create_job(u'Test Job', self.config_xml)
self.assertEqual(
jenkins_mock.call_args_list[1][0][0].url,
self.make_url('createItem?name=Test%20Job'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_in_folder(self, jenkins_mock):
jenkins_mock.side_effect = [
jenkins.NotFoundException(),
None,
json.dumps({'name': 'Test Job'}),
]
self.j.create_job(u'a Folder/Test Job', self.config_xml)
self.assertEqual(
jenkins_mock.call_args_list[1][0][0].url,
self.make_url('job/a%20Folder/createItem?name=Test%20Job'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_already_exists(self, jenkins_mock):
jenkins_mock.side_effect = [
json.dumps({'name': 'TestJob'}),
None,
]
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.create_job(u'TestJob', self.config_xml)
self.assertEqual(
jenkins_mock.call_args_list[0][0][0].url,
self.make_url('job/TestJob/api/json?tree=name'))
self.assertEqual(
str(context_manager.exception),
'job[TestJob] already exists')
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_already_exists_in_folder(self, jenkins_mock):
jenkins_mock.side_effect = [
json.dumps({'name': 'TestJob'}),
None,
]
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.create_job(u'a Folder/TestJob', self.config_xml)
self.assertEqual(
jenkins_mock.call_args_list[0][0][0].url,
self.make_url('job/a%20Folder/job/TestJob/api/json?tree=name'))
self.assertEqual(
str(context_manager.exception),
'job[a Folder/TestJob] already exists')
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_failed(self, jenkins_mock):
jenkins_mock.side_effect = [
jenkins.NotFoundException(),
None,
jenkins.NotFoundException(),
]
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.create_job(u'TestJob', self.config_xml)
self.assertEqual(
jenkins_mock.call_args_list[0][0][0].url,
self.make_url('job/TestJob/api/json?tree=name'))
self.assertEqual(
jenkins_mock.call_args_list[1][0][0].url,
self.make_url('createItem?name=TestJob'))
self.assertEqual(
str(context_manager.exception),
'create[TestJob] failed')
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_failed_in_folder(self, jenkins_mock):
jenkins_mock.side_effect = [
jenkins.NotFoundException(),
None,
jenkins.NotFoundException(),
]
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.create_job(u'a Folder/TestJob', self.config_xml)
self.assertEqual(
jenkins_mock.call_args_list[0][0][0].url,
self.make_url('job/a%20Folder/job/TestJob/api/json?tree=name'))
self.assertEqual(
jenkins_mock.call_args_list[1][0][0].url,
self.make_url('job/a%20Folder/createItem?name=TestJob'))
self.assertEqual(
str(context_manager.exception),
'create[a Folder/TestJob] failed')
self._check_requests(jenkins_mock.call_args_list)
| bsd-3-clause |
AndydeCleyre/plumbum | docs/conf.py | 9 | 8308 | # -*- coding: utf-8 -*-
#
# Plumbum Shell Combinators documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 29 16:24:32 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, time
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Plumbum Shell Combinators'
copyright = u'%d, Tomer Filiba, licensed under Attribution-ShareAlike 3.0' % (time.gmtime().tm_year,)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from plumbum.version import version_string, release_date
version = version_string
# The full version, including alpha/beta/rc tags.
release = version_string + " / " + release_date
autodoc_member_order = "bysource"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_news.rst', '_cheatsheet.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"full_logo" : True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Plumbum: Shell Combinators"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo8.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PlumbumShellCombinatorsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PlumbumShellCombinators.tex', u'Plumbum Shell Combinators Documentation',
u'Tomer Filiba', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'plumbumshellcombinators', u'Plumbum Shell Combinators Documentation',
[u'Tomer Filiba'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PlumbumShellCombinators', u'Plumbum Shell Combinators Documentation',
u'Tomer Filiba', 'PlumbumShellCombinators', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
Quikling/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py | 3 | 5054 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import glob
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from tinctest.lib import local_path, Gpdiff
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.config import GPDBConfig
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.gpdbverify import GpdbVerify
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.gpdb.tests.storage.lib.common_utils import *
class GPDBStorageBaseTestCase():
'''
Base Class for Storage test-suits like Crash Recovery,
Pg_Two_Phase, sub_transaction
'''
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.filereputil = Filerepe2e_Util()
self.gprecover = GpRecover(self.config)
self.gpstop = GpStop()
self.gpstart = GpStart()
self.gpfile = Gpfilespace(self.config)
self.gpverify = GpdbVerify(config=self.config)
self.dbstate = DbStateClass('run_validation', self.config)
self.port = os.getenv('PGPORT')
def invoke_fault(self, fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
def start_db(self):
'''Gpstart '''
rc = self.gpstart.run_gpstart_cmd()
if not rc:
raise Exception('Failed to start the cluster')
tinctest.logger.info('Started the cluster successfully')
def stop_db(self):
''' Gpstop and dont check for rc '''
cmd = Command('Gpstop_a', 'gpstop -a')
tinctest.logger.info('Executing command: gpstop -a')
cmd.run()
def get_trigger_status(self, trigger_count,max_cnt=50):
'''Compare the pg_stat_activity count with the total number of trigger_sqls executed '''
psql_count=0
for i in range(1,trigger_count):
psql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags='-q -t', dbname='postgres')
sleep(1)
tinctest.logger.info('Count of trigger sqls %s And it should be %s' % (psql_count, trigger_count))
if psql_count < trigger_count :
tinctest.logger.info('coming to the if loop in get_trigger_status')
return False
return True
def check_trigger_sql_hang(self, test_dir):
'''
@param ddl_type : create/drop
@param fault_type : commit/abort/end_prepare_two_phase_sleep
@description : Return the status of the trigger sqls: whether they are waiting on the fault
Since gpfaultinjector has no way to check if all the sqls are triggered, we are using
a count(*) on pg_stat_activity and compare the total number of trigger_sqls
'''
trigger_dir = local_path('%s_tests/trigger_sql/' % (test_dir))
trigger_count = len(glob.glob1(trigger_dir,"*.ans"))
return self.get_trigger_status(trigger_count)
def get_items_list(test_file):
''' Get file contents to a list '''
with open(test_file, 'r') as f:
test_list = [line.strip() for line in f]
return test_list
def validate_sql(filename):
''' Compare the out and ans files '''
out_file = local_path(filename.replace(".sql", ".out"))
ans_file = local_path(filename.replace('.sql' , '.ans'))
assert Gpdiff.are_files_equal(out_file, ans_file)
def run_sql(filename, verify=True):
''' Run the provided sql and validate it '''
out_file = local_path(filename.replace(".sql", ".out"))
PSQL.run_sql_file(sql_file = filename, out_file = out_file)
if verify == True:
validate_sql(filename)
| apache-2.0 |
fifengine/fifengine | engine/python/fife/extensions/pychan/widgets/label.py | 1 | 4317 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from fife import fifechan
from fife.extensions.pychan.attrs import BoolAttr
from .basictextwidget import BasicTextWidget
class Label(BasicTextWidget):
"""
A basic label - displaying a string.
Also allows text wrapping and onMouse hover callbacks.
New Attributes
==============
- wrap_text: Boolean: Enable/Disable automatic text wrapping. Disabled by default.
Currently to actually see text wrapping you have to explicitly set a max_size with
the desired width of the text, as the layout engine is not capable of deriving
the maximum width from a parent container.
"""
ATTRIBUTES = BasicTextWidget.ATTRIBUTES + [ BoolAttr('wrap_text') ]
DEFAULT_WRAP_TEXT = False
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None,
text = None,
wrap_text = None):
self.real_widget = fifechan.Label("")
self.wrap_text = self.DEFAULT_WRAP_TEXT
super(Label,self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
base_color=base_color,
background_color=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment,
text=text)
if wrap_text is not None:
self.wrap_text = wrap_text
def clone(self, prefix):
lblClone = Label(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment,
self.text,
self.wrap_text)
return lblClone;
def _setTextWrapping(self,wrapping): self.real_widget.setTextWrapping(wrapping)
def _getTextWrapping(self): self.real_widget.isTextWrapping()
wrap_text = property(_getTextWrapping,_setTextWrapping)
| lgpl-2.1 |
cvegaj/ElectriCERT | venv3/lib/python3.6/site-packages/wheel/test/test_basic.py | 472 | 6405 | """
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
| gpl-3.0 |
simon-r/dr14_t.meter | dr14tmeter/audio_math.py | 1 | 1648 | # dr14_t.meter: compute the DR14 value of the given audiofiles
# Copyright (C) 2011 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import hashlib
def dr_rms(y):
n = y.shape
return numpy.sqrt(2.0 * numpy.sum(y**2.0, 0) / float(n[0]))
def u_rms(y):
n = y.shape
return numpy.sqrt(numpy.sum(y**2.0, 0) / float(n[0]))
def decibel_u(y, ref):
return 20.0 * numpy.log10(y / ref)
def decibel_p(y, ref):
return 10.0 * numpy.log10(y / ref)
def audio_min():
return 1.0 / (2.0**24)
def audio_min16():
return 1.0 / (2.0**16)
def max_dynamic(bit):
return 20.0 * numpy.log10(2.0**bit)
def normalize(y, ml=1.0):
m = numpy.max(numpy.abs(y))
return ml * (y * (1.0 / m))
# compute the sha1 fingerprint of the track
# Do not modify this function !!
def sha1_track_v1(y, ext_code=0):
n = y.shape
if n[0] <= 44100 * 2:
shat = hashlib.sha1(y + ext_code).hexdigest()
else:
shat = hashlib.sha1(y[::100, :] + ext_code).hexdigest()
#print( shat )
return shat
| gpl-3.0 |
pombredanne/cobbler-3 | cobbler/action_report.py | 15 | 12031 | """
Report from a cobbler master.
FIXME: reinstante functionality for 2.0
Copyright 2007-2009, Red Hat, Inc and Others
Anderson Silva <ansilva@redhat.com>
Michael DeHaan <michael.dehaan AT gmail>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
"""
import re
import clogger
import utils
class Report:
def __init__(self, collection_mgr, logger=None):
"""
Constructor
"""
self.collection_mgr = collection_mgr
self.settings = collection_mgr.settings()
self.api = collection_mgr.api
self.report_type = None
self.report_what = None
self.report_name = None
self.report_fields = None
self.report_noheaders = None
self.array_re = re.compile('([^[]+)\[([^]]+)\]')
if logger is None:
logger = clogger.Logger()
self.logger = logger
def fielder(self, structure, fields_list):
"""
Return data from a subset of fields of some item
"""
item = {}
for field in fields_list:
internal = self.array_re.search(field)
# check if field is primary field
if field in structure.keys():
item[field] = structure[field]
# check if subfield in 'interfaces' field
elif internal and internal.group(1) in structure.keys():
outer = internal.group(1)
inner = internal.group(2)
if isinstance(structure[outer], dict) and inner in structure[outer]:
item[field] = structure[outer][inner]
elif "interfaces" in structure.keys():
for device in structure['interfaces'].keys():
if field in structure['interfaces'][device]:
item[field] = device + ': ' + structure['interfaces'][device][field]
return item
def reporting_csv(self, info, order, noheaders):
"""
Formats data on 'info' for csv output
"""
outputheaders = ''
outputbody = ''
sep = ','
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0:
outputheaders += str(key) + sep
if key in item.keys():
outputbody += str(item[key]) + sep
else:
outputbody += '-' + sep
item_count += 1
info_count += 1
outputbody += '\n'
outputheaders += '\n'
if noheaders:
outputheaders = ''
return outputheaders + outputbody
def reporting_trac(self, info, order, noheaders):
"""
Formats data on 'info' for trac wiki table output
"""
outputheaders = ''
outputbody = ''
sep = '||'
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0:
outputheaders += sep + str(key)
if key in item.keys():
outputbody += sep + str(item[key])
else:
outputbody += sep + '-'
item_count = item_count + 1
info_count = info_count + 1
outputbody += '||\n'
outputheaders += '||\n'
if noheaders:
outputheaders = ''
return outputheaders + outputbody
def reporting_doku(self, info, order, noheaders):
"""
Formats data on 'info' for doku wiki table output
"""
outputheaders = ''
outputbody = ''
sep1 = '^'
sep2 = '|'
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0:
outputheaders += sep1 + key
if key in item.keys():
outputbody += sep2 + item[key]
else:
outputbody += sep2 + '-'
item_count = item_count + 1
info_count = info_count + 1
outputbody += sep2 + '\n'
outputheaders += sep1 + '\n'
if noheaders:
outputheaders = ''
return outputheaders + outputbody
def reporting_mediawiki(self, info, order, noheaders):
"""
Formats data on 'info' for mediawiki table output
"""
outputheaders = ''
outputbody = ''
opentable = '{| border="1"\n'
closetable = '|}\n'
sep1 = '||'
sep2 = '|'
sep3 = '|-'
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0 and item_count == 0:
outputheaders += sep2 + key
elif info_count == 0:
outputheaders += sep1 + key
if item_count == 0:
if key in item.keys():
outputbody += sep2 + str(item[key])
else:
outputbody += sep2 + '-'
else:
if key in item.keys():
outputbody += sep1 + str(item[key])
else:
outputbody += sep1 + '-'
item_count = item_count + 1
info_count = info_count + 1
outputbody += '\n' + sep3 + '\n'
outputheaders += '\n' + sep3 + '\n'
if noheaders:
outputheaders = ''
return opentable + outputheaders + outputbody + closetable
def print_formatted_data(self, data, order, report_type, noheaders):
"""
Used for picking the correct format to output data as
"""
if report_type == "csv":
self.logger.flat(self.reporting_csv(data, order, noheaders))
if report_type == "mediawiki":
self.logger.flat(self.reporting_mediawiki(data, order, noheaders))
if report_type == "trac":
self.logger.flat(self.reporting_trac(data, order, noheaders))
if report_type == "doku":
self.logger.flat(self.reporting_doku(data, order, noheaders))
def reporting_sorter(self, a, b):
"""
Used for sorting cobbler objects for report commands
"""
return cmp(a.name, b.name)
def reporting_print_sorted(self, collection):
"""
Prints all objects in a collection sorted by name
"""
collection = [x for x in collection]
collection.sort(self.reporting_sorter)
for x in collection:
self.logger.flat(x.to_string())
def reporting_list_names2(self, collection, name):
"""
Prints a specific object in a collection.
"""
obj = collection.get(name)
if obj is not None:
self.logger.flat(obj.to_string())
def reporting_print_all_fields(self, collection, report_name, report_type, report_noheaders):
"""
Prints all fields in a collection as a table given the report type
"""
# per-item hack
if report_name:
collection = collection.find(name=report_name)
if collection:
collection = [collection]
else:
return
collection = [x for x in collection]
collection.sort(self.reporting_sorter)
data = []
out_order = []
count = 0
for x in collection:
item = {}
if x.ITEM_TYPE == "settings":
structure = x.to_dict()
else:
structure = x.to_list()
for (key, value) in structure.iteritems():
# exception for systems which could have > 1 interface
if key == "interfaces":
for (device, info) in value.iteritems():
for (info_header, info_value) in info.iteritems():
item[info_header] = str(device) + ': ' + str(info_value)
# needs to create order list for print_formatted_fields
if count == 0:
out_order.append(info_header)
else:
item[key] = value
# needs to create order list for print_formatted_fields
if count == 0:
out_order.append(key)
count = count + 1
data.append(item)
self.print_formatted_data(data=data, order=out_order, report_type=report_type, noheaders=report_noheaders)
def reporting_print_x_fields(self, collection, report_name, report_type, report_fields, report_noheaders):
"""
Prints specific fields in a collection as a table given the report type
"""
# per-item hack
if report_name:
collection = collection.find(name=report_name)
if collection:
collection = [collection]
else:
return
collection = [x for x in collection]
collection.sort(self.reporting_sorter)
data = []
fields_list = report_fields.replace(' ', '').split(',')
for x in collection:
if x.ITEM_TYPE == "settings":
structure = x.to_dict()
else:
structure = x.to_list()
item = self.fielder(structure, fields_list)
data.append(item)
self.print_formatted_data(data=data, order=fields_list, report_type=report_type, noheaders=report_noheaders)
# -------------------------------------------------------
def run(self, report_what=None, report_name=None, report_type=None, report_fields=None, report_noheaders=None):
"""
Get remote profiles and distros and sync them locally
1. Handles original report output
2. Handles all fields of report outputs as table given a format
3. Handles specific fields of report outputs as table given a format
"""
if report_type == 'text' and report_fields == 'all':
for collection_name in ["distro", "profile", "system", "repo", "network", "image", "mgmtclass", "package", "file"]:
if report_what == "all" or report_what == collection_name or report_what == "%ss" % collection_name or report_what == "%ses" % collection_name:
if report_name:
self.reporting_list_names2(self.api.get_items(collection_name), report_name)
else:
self.reporting_print_sorted(self.api.get_items(collection_name))
elif report_type == 'text' and report_fields != 'all':
utils.die(self.logger, "The 'text' type can only be used with field set to 'all'")
elif report_type != 'text' and report_fields == 'all':
for collection_name in ["distro", "profile", "system", "repo", "network", "image", "mgmtclass", "package", "file"]:
if report_what == "all" or report_what == collection_name or report_what == "%ss" % collection_name or report_what == "%ses" % collection_name:
self.reporting_print_all_fields(self.api.get_items(collection_name), report_name, report_type, report_noheaders)
else:
for collection_name in ["distro", "profile", "system", "repo", "network", "image", "mgmtclass", "package", "file"]:
if report_what == "all" or report_what == collection_name or report_what == "%ss" % collection_name or report_what == "%ses" % collection_name:
self.reporting_print_x_fields(self.api.get_items(collection_name), report_name, report_type, report_fields, report_noheaders)
| gpl-2.0 |
modulexcite/catapult | third_party/graphy/graphy/backends/google_chart_api/util.py | 233 | 6335 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with the Google Chart API.
Not intended for end users, use the methods in __init__ instead."""
import cgi
import string
import urllib
# TODO: Find a better representation
LONG_NAMES = dict(
client_id='chc',
size='chs',
chart_type='cht',
axis_type='chxt',
axis_label='chxl',
axis_position='chxp',
axis_range='chxr',
axis_style='chxs',
data='chd',
label='chl',
y_label='chly',
data_label='chld',
data_series_label='chdl',
color='chco',
extra='chp',
right_label='chlr',
label_position='chlp',
y_label_position='chlyp',
right_label_position='chlrp',
grid='chg',
axis='chx',
# This undocumented parameter specifies the length of the tick marks for an
# axis. Negative values will extend tick marks into the main graph area.
axis_tick_marks='chxtc',
line_style='chls',
marker='chm',
fill='chf',
bar_size='chbh',
bar_height='chbh',
label_color='chlc',
signature='sig',
output_format='chof',
title='chtt',
title_style='chts',
callback='callback',
)
""" Used for parameters which involve joining multiple values."""
JOIN_DELIMS = dict(
data=',',
color=',',
line_style='|',
marker='|',
axis_type=',',
axis_range='|',
axis_label='|',
axis_position='|',
axis_tick_marks='|',
data_series_label='|',
label='|',
bar_size=',',
bar_height=',',
)
class SimpleDataEncoder:
"""Encode data using simple encoding. Out-of-range data will
be dropped (encoded as '_').
"""
def __init__(self):
self.prefix = 's:'
self.code = string.ascii_uppercase + string.ascii_lowercase + string.digits
self.min = 0
self.max = len(self.code) - 1
def Encode(self, data):
return ''.join(self._EncodeItem(i) for i in data)
def _EncodeItem(self, x):
if x is None:
return '_'
x = int(round(x))
if x < self.min or x > self.max:
return '_'
return self.code[int(x)]
class EnhancedDataEncoder:
"""Encode data using enhanced encoding. Out-of-range data will
be dropped (encoded as '_').
"""
def __init__(self):
self.prefix = 'e:'
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits \
+ '-.'
self.code = [x + y for x in chars for y in chars]
self.min = 0
self.max = len(self.code) - 1
def Encode(self, data):
return ''.join(self._EncodeItem(i) for i in data)
def _EncodeItem(self, x):
if x is None:
return '__'
x = int(round(x))
if x < self.min or x > self.max:
return '__'
return self.code[int(x)]
def EncodeUrl(base, params, escape_url, use_html_entities):
"""Escape params, combine and append them to base to generate a full URL."""
real_params = []
for key, value in params.iteritems():
if escape_url:
value = urllib.quote(value)
if value:
real_params.append('%s=%s' % (key, value))
if real_params:
url = '%s?%s' % (base, '&'.join(real_params))
else:
url = base
if use_html_entities:
url = cgi.escape(url, quote=True)
return url
def ShortenParameterNames(params):
"""Shorten long parameter names (like size) to short names (like chs)."""
out = {}
for name, value in params.iteritems():
short_name = LONG_NAMES.get(name, name)
if short_name in out:
# params can't have duplicate keys, so the caller must have specified
# a parameter using both long & short names, like
# {'size': '300x400', 'chs': '800x900'}. We don't know which to use.
raise KeyError('Both long and short version of parameter %s (%s) '
'found. It is unclear which one to use.' % (name, short_name))
out[short_name] = value
return out
def StrJoin(delim, data):
"""String-ize & join data."""
return delim.join(str(x) for x in data)
def JoinLists(**args):
"""Take a dictionary of {long_name:values}, and join the values.
For each long_name, join the values into a string according to
JOIN_DELIMS. If values is empty or None, replace with an empty string.
Returns:
A dictionary {long_name:joined_value} entries.
"""
out = {}
for key, val in args.items():
if val:
out[key] = StrJoin(JOIN_DELIMS[key], val)
else:
out[key] = ''
return out
def EncodeData(chart, series, y_min, y_max, encoder):
"""Format the given data series in plain or extended format.
Use the chart's encoder to determine the format. The formatted data will
be scaled to fit within the range of values supported by the chosen
encoding.
Args:
chart: The chart.
series: A list of the the data series to format; each list element is
a list of data points.
y_min: Minimum data value. May be None if y_max is also None
y_max: Maximum data value. May be None if y_min is also None
Returns:
A dictionary with one key, 'data', whose value is the fully encoded series.
"""
assert (y_min is None) == (y_max is None)
if y_min is not None:
def _ScaleAndEncode(series):
series = ScaleData(series, y_min, y_max, encoder.min, encoder.max)
return encoder.Encode(series)
encoded_series = [_ScaleAndEncode(s) for s in series]
else:
encoded_series = [encoder.Encode(s) for s in series]
result = JoinLists(**{'data': encoded_series})
result['data'] = encoder.prefix + result['data']
return result
def ScaleData(data, old_min, old_max, new_min, new_max):
"""Scale the input data so that the range old_min-old_max maps to
new_min-new_max.
"""
def ScalePoint(x):
if x is None:
return None
return scale * x + translate
if old_min == old_max:
scale = 1
else:
scale = (new_max - new_min) / float(old_max - old_min)
translate = new_min - scale * old_min
return map(ScalePoint, data)
| bsd-3-clause |
wojenny/THash | p2pool/bitcoin/networks/doubloons.py | 1 | 1231 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fcd9b7dd'.decode('hex')
P2P_PORT = 1336
ADDRESS_VERSION = 24
RPC_PORT = 1337
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'doubloons address' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 1*100000000 >> (height + 1)//1080000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 30 # s targetspacing
SYMBOL = 'DBL'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'doubloons') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Doubloons/') if platform.system() == 'Darwin' else os.path.expanduser('~/.doubloons'), 'doubloons.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://explorer.doubloons.net/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://explorer.doubloons.net/address/'
TX_EXPLORER_URL_PREFIX = 'http://explorer.doubloons.net/transaction/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.001e8
| gpl-3.0 |
coderjames/pascal | quex-0.63.1/quex/engine/state_machine/parallelize.py | 1 | 4841 | # (C) 2005-2011 Frank-Rene Schaefer
# ABSOLUTELY NO WARRANTY
###############################################################################
from quex.engine.state_machine.core import StateMachine, State
import quex.engine.state_machine.index as index
def do(StateMachineList, CommonTerminalStateF=True, CloneF=True):
"""Connect state machines paralell.
CommonTerminalStateF tells whether the state machines shall trigger
to a common terminal. This may help nfa-to-dfa
or hopcroft minimization for ISOLATED patterns.
A state machine that consists of the COMBINATION
of patterns MUST set this flag to 'False'.
CloneF Controls if state machine list is cloned or not.
If the single state machines are no longer required after
construction, the CloneF can be set to False.
If Cloning is disabled the state machines themselves
will be altered--which brings some advantage in speed.
"""
assert type(StateMachineList) == list
assert len(StateMachineList) != 0
assert map(lambda x: x.__class__.__name__, StateMachineList) == ["StateMachine"] * len(StateMachineList)
# filter out empty state machines from the consideration
state_machine_list = [ sm for sm in StateMachineList if not sm.is_empty() ]
empty_state_machine_list = [ sm for sm in StateMachineList if sm.is_empty() ]
if len(state_machine_list) < 2:
if len(state_machine_list) < 1: result = StateMachine()
elif CloneF: result = state_machine_list[0].clone()
else: result = state_machine_list[0]
return __consider_empty_state_machines(result, empty_state_machine_list)
# (*) need to clone the state machines, i.e. provide their internal
# states with new ids, but the 'behavior' remains. This allows
# state machines to appear twice, or being used in 'larger'
# conglomerates.
if CloneF: clone_list = map(lambda sm: sm.clone(), state_machine_list)
else: clone_list = state_machine_list
# (*) collect all transitions from both state machines into a single one
# (clone to ensure unique identifiers of states)
new_init_state = State.new_merged_core_state((clone.get_init_state() for clone in clone_list),
ClearF=True)
result = StateMachine(InitState=new_init_state)
for clone in clone_list:
result.states.update(clone.states)
# (*) add additional **init** and **end** state
# NOTE: when the result state machine was created, it already contains a
# new initial state index. thus at this point only the new terminal
# state has to be created.
# NOTE: it is essential that the acceptance flag stays False, at this
# point in time, so that the mounting operations only happen on
# the old acceptance states. Later the acceptance state is raised
# to 'accepted' (see below)
new_terminal_state_index = -1L
if CommonTerminalStateF:
new_terminal_state_index = index.get()
result.states[new_terminal_state_index] = \
State.new_merged_core_state(result.get_acceptance_state_list(), \
ClearF=True)
# (*) Connect from the new initial state to the initial states of the
# clones via epsilon transition.
# Connect from each success state of the clones to the new end state
# via epsilon transition.
for clone in clone_list:
result.mount_to_initial_state(clone.init_state_index)
if CommonTerminalStateF:
result.mount_to_acceptance_states(new_terminal_state_index,
CancelStartAcceptanceStateF=False)
return __consider_empty_state_machines(result, empty_state_machine_list)
def __consider_empty_state_machines(sm, EmptyStateMachineList):
"""An empty state machine basically means that its init state is going to
be merge into the init state of the resulting state machine.
If there is an empty state machine with an acceptance, then this is
reflected in the origins. Thus, the result's init state becomes an
acceptance state for that pattern.
=> There is no particular need for an epsilon transition to the common
new terminal index.
"""
init_state_origins = sm.get_init_state().origins()
for esm in EmptyStateMachineList:
init_state_origins.merge(esm.get_init_state().origins())
return sm
| bsd-2-clause |
CCI-MOC/nova | nova/tests/unit/cells/test_cells_weights.py | 17 | 9058 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for testing the cells weight algorithms.
Cells with higher weights should be given priority for new builds.
"""
import datetime
from oslo_utils import timeutils
from nova.cells import state
from nova.cells import weights
from nova import test
class FakeCellState(state.CellState):
def __init__(self, cell_name):
super(FakeCellState, self).__init__(cell_name)
self.capacities['ram_free'] = {'total_mb': 0,
'units_by_mb': {}}
self.db_info = {}
def _update_ram_free(self, *args):
ram_free = self.capacities['ram_free']
for ram_size, units in args:
ram_free['total_mb'] += units * ram_size
ram_free['units_by_mb'][str(ram_size)] = units
def _get_fake_cells():
cell1 = FakeCellState('cell1')
cell1._update_ram_free((512, 1), (1024, 4), (2048, 3))
cell1.db_info['weight_offset'] = -200.0
cell2 = FakeCellState('cell2')
cell2._update_ram_free((512, 2), (1024, 3), (2048, 4))
cell2.db_info['weight_offset'] = -200.1
cell3 = FakeCellState('cell3')
cell3._update_ram_free((512, 3), (1024, 2), (2048, 1))
cell3.db_info['weight_offset'] = 400.0
cell4 = FakeCellState('cell4')
cell4._update_ram_free((512, 4), (1024, 1), (2048, 2))
cell4.db_info['weight_offset'] = 300.0
return [cell1, cell2, cell3, cell4]
class CellsWeightsTestCase(test.NoDBTestCase):
"""Makes sure the proper weighers are in the directory."""
def test_all_weighers(self):
weighers = weights.all_weighers()
# Check at least a couple that we expect are there
self.assertTrue(len(weighers) >= 2)
class_names = [cls.__name__ for cls in weighers]
self.assertIn('WeightOffsetWeigher', class_names)
self.assertIn('RamByInstanceTypeWeigher', class_names)
class _WeigherTestClass(test.NoDBTestCase):
"""Base class for testing individual weigher plugins."""
weigher_cls_name = None
def setUp(self):
super(_WeigherTestClass, self).setUp()
self.weight_handler = weights.CellWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
[self.weigher_cls_name])
self.weighers = [cls() for cls in weigher_classes]
def _get_weighed_cells(self, cells, weight_properties):
return self.weight_handler.get_weighed_objects(self.weighers,
cells, weight_properties)
class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass):
weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.'
'RamByInstanceTypeWeigher')
def test_default_spreading(self):
"""Test that cells with more ram available return a higher weight."""
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[1], cells[0], cells[3], cells[2]]
self.assertEqual(expected_cells, resulting_cells)
def test_negative_multiplier(self):
"""Test that cells with less ram available return a higher weight."""
self.flags(ram_weight_multiplier=-1.0, group='cells')
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
self.assertEqual(expected_cells, resulting_cells)
class WeightOffsetWeigherTestClass(_WeigherTestClass):
"""Test the RAMWeigher class."""
weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher'
def test_weight_offset(self):
"""Test that cells with higher weight_offsets return higher
weights.
"""
cells = _get_fake_cells()
weighed_cells = self._get_weighed_cells(cells, {})
self.assertEqual(4, len(weighed_cells))
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
self.assertEqual(expected_cells, resulting_cells)
class MuteWeigherTestClass(_WeigherTestClass):
weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher'
def setUp(self):
super(MuteWeigherTestClass, self).setUp()
self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100,
group='cells')
self.now = timeutils.utcnow()
timeutils.set_time_override(self.now)
self.cells = _get_fake_cells()
for cell in self.cells:
cell.last_seen = self.now
def tearDown(self):
super(MuteWeigherTestClass, self).tearDown()
timeutils.clear_time_override()
def test_non_mute(self):
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for weighed_cell in weighed_cells:
self.assertEqual(0, weighed_cell.weight)
def test_mutes(self):
# make 2 of them mute:
self.cells[0].last_seen = (self.cells[0].last_seen -
datetime.timedelta(seconds=200))
self.cells[1].last_seen = (self.cells[1].last_seen -
datetime.timedelta(seconds=200))
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4'])
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(-10.0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2'])
| apache-2.0 |
Jenselme/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_skipping.py | 165 | 26964 | import pytest
import sys
from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup
from _pytest.runner import runtestprotocol
class TestEvaluator:
def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, 'skipif')
assert not evalskipif
assert not evalskipif.istrue()
def test_marked_no_args(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == ""
assert not ev.get("run", False)
def test_marked_one_arg(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
@pytest.mark.skipif('sys.version_info[0] >= 3')
def test_marked_one_arg_unicode(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz(u"hasattr(os, 'sep')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
def test_marked_one_arg_twice(self, testdir):
lines = [
'''@pytest.mark.skipif("not hasattr(os, 'murks')")''',
'''@pytest.mark.skipif("hasattr(os, 'murks')")'''
]
for i in range(0, 2):
item = testdir.getitem("""
import pytest
%s
%s
def test_func():
pass
""" % (lines[i], lines[(i+1) %2]))
ev = MarkEvaluator(item, 'skipif')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif("hasattr(os, 'murks')")
@pytest.mark.skipif("not hasattr(os, 'murks')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'skipif')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif(False)
def test_func():
pass
""")
ev = MarkEvaluator(item, 'skipif')
exc = pytest.raises(pytest.fail.Exception, ev.istrue)
assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg
def test_skipif_class(self, testdir):
item, = testdir.getitems("""
import pytest
class TestClass:
pytestmark = pytest.mark.skipif("config._hackxyz")
def test_func(self):
pass
""")
item.config._hackxyz = 3
ev = MarkEvaluator(item, 'skipif')
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: config._hackxyz"
class TestXFail:
@pytest.mark.parametrize('strict', [True, False])
def test_xfail_simple(self, testdir, strict):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail(strict=%s)
def test_func():
assert 0
""" % strict)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.skipped
assert callreport.wasxfail == ""
def test_xfail_xpassed(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail
def test_func():
assert 1
""")
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.failed
assert callreport.wasxfail == ""
def test_xfail_run_anyway(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
def test_func2():
pytest.xfail("hello")
""")
result = testdir.runpytest("--runxfail")
result.stdout.fnmatch_lines([
"*def test_func():*",
"*assert 0*",
"*1 failed*1 pass*",
])
def test_xfail_evalfalse_but_fails(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail('False')
def test_func():
assert 0
""")
reports = runtestprotocol(item, log=False)
callreport = reports[1]
assert callreport.failed
assert not hasattr(callreport, "wasxfail")
assert 'xfail' in callreport.keywords
def test_xfail_not_report_default(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail
def test_this():
assert 0
""")
testdir.runpytest(p, '-v')
#result.stdout.fnmatch_lines([
# "*HINT*use*-r*"
#])
def test_xfail_not_run_xfail_reporting(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="noway")
def test_this():
assert 0
@pytest.mark.xfail("True", run=False)
def test_this_true():
assert 0
@pytest.mark.xfail("False", run=False, reason="huh")
def test_this_false():
assert 1
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*",
"*NOTRUN*noway",
"*test_one*test_this_true*",
"*NOTRUN*condition:*True*",
"*1 passed*",
])
def test_xfail_not_run_no_setup_run(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="hello")
def test_this():
assert 0
def setup_module(mod):
raise ValueError(42)
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*",
"*NOTRUN*hello",
"*1 xfailed*",
])
def test_xfail_xpass(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rX')
result.stdout.fnmatch_lines([
"*XPASS*test_that*",
"*1 xpassed*"
])
assert result.ret == 0
def test_xfail_imperative(self, testdir):
p = testdir.makepyfile("""
import pytest
def test_this():
pytest.xfail("hello")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*reason:*hello*",
])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("*1 pass*")
def test_xfail_imperative_in_setup_function(self, testdir):
p = testdir.makepyfile("""
import pytest
def setup_function(function):
pytest.xfail("hello")
def test_this():
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*reason:*hello*",
])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("""
*def test_this*
*1 fail*
""")
def xtest_dynamic_xfail_set_during_setup(self, testdir):
p = testdir.makepyfile("""
import pytest
def setup_function(function):
pytest.mark.xfail(function)
def test_this():
assert 0
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*XPASS*test_that*",
])
def test_dynamic_xfail_no_run(self, testdir):
p = testdir.makepyfile("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.xfail(run=False))
def test_this(arg):
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*NOTRUN*",
])
def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
p = testdir.makepyfile("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.xfail)
def test_this2(arg):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
@pytest.mark.parametrize('expected, actual, matchline',
[('TypeError', 'TypeError', "*1 xfailed*"),
('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
('TypeError', 'IndexError', "*1 failed*"),
('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
])
def test_xfail_raises(self, expected, actual, matchline, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=%s)
def test_raises():
raise %s()
""" % (expected, actual))
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
matchline,
])
def test_strict_sanity(self, testdir):
"""sanity check for xfail(strict=True): a failing test should behave
exactly like a normal xfail.
"""
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=True)
def test_foo():
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
'*XFAIL*',
'*unsupported feature*',
])
assert result.ret == 0
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
with open('foo_executed', 'w'): pass # make sure test executes
""" % strict)
result = testdir.runpytest(p, '-rxX')
if strict:
result.stdout.fnmatch_lines([
'*test_foo*',
'*XPASS(strict)*unsupported feature*',
])
else:
result.stdout.fnmatch_lines([
'*test_strict_xfail*',
'XPASS test_strict_xfail.py::test_foo unsupported feature',
])
assert result.ret == (1 if strict else 0)
assert testdir.tmpdir.join('foo_executed').isfile()
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail_condition(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == 0
@pytest.mark.parametrize('strict_val', ['true', 'false'])
def test_strict_xfail_default_from_file(self, testdir, strict_val):
testdir.makeini('''
[pytest]
xfail_strict = %s
''' % strict_val)
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
def test_foo():
pass
""")
result = testdir.runpytest(p, '-rxX')
strict = strict_val == 'true'
result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
assert result.ret == (1 if strict else 0)
class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir):
testdir.makepyfile("""
import pytest
def setup_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 xfail*",
])
def test_failing_teardown_issue9(self, testdir):
testdir.makepyfile("""
import pytest
def teardown_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 xfail*",
])
class TestSkip:
def test_skip_class(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
class TestSomething(object):
def test_foo(self):
pass
def test_bar(self):
pass
def test_baz():
pass
""")
rec = testdir.inline_run()
rec.assertoutcome(skipped=2, passed=1)
def test_skips_on_false_string(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip('False')
def test_foo():
pass
""")
rec = testdir.inline_run()
rec.assertoutcome(skipped=1)
def test_arg_as_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip('testing stuff')
def test_bar():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*testing stuff*",
"*1 skipped*",
])
def test_skip_no_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_foo():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*unconditional skip*",
"*1 skipped*",
])
def test_skip_with_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip(reason="for lolz")
def test_bar():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*for lolz*",
"*1 skipped*",
])
def test_only_skips_marked_test(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_foo():
pass
@pytest.mark.skip(reason="nothing in particular")
def test_bar():
pass
def test_baz():
assert True
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*nothing in particular*",
"*1 passed*2 skipped*",
])
class TestSkipif:
def test_skipif_conditional(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')")
def test_func():
pass
""") # noqa
x = pytest.raises(pytest.skip.Exception, lambda:
pytest_runtest_setup(item))
assert x.value.msg == "condition: hasattr(os, 'sep')"
@pytest.mark.parametrize('params', [
'"hasattr(sys, \'platform\')"',
'True, reason="invalid platform"',
])
def test_skipif_reporting(self, testdir, params):
p = testdir.makepyfile(test_foo="""
import pytest
@pytest.mark.skipif(%(params)s)
def test_that():
assert 0
""" % dict(params=params))
result = testdir.runpytest(p, '-s', '-rs')
result.stdout.fnmatch_lines([
"*SKIP*1*test_foo.py*platform*",
"*1 skipped*"
])
assert result.ret == 0
@pytest.mark.parametrize('marker, msg1, msg2', [
('skipif', 'SKIP', 'skipped'),
('xfail', 'XPASS', 'xpassed'),
])
def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):
testdir.makepyfile(test_foo="""
import pytest
@pytest.mark.{marker}(False, reason='first_condition')
@pytest.mark.{marker}(True, reason='second_condition')
def test_foobar():
assert 1
""".format(marker=marker))
result = testdir.runpytest('-s', '-rsxX')
result.stdout.fnmatch_lines([
"*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
"*1 {msg2}*".format(msg2=msg2),
])
assert result.ret == 0
def test_skip_not_report_default(testdir):
p = testdir.makepyfile(test_one="""
import pytest
def test_this():
pytest.skip("hello")
""")
result = testdir.runpytest(p, '-v')
result.stdout.fnmatch_lines([
#"*HINT*use*-r*",
"*1 skipped*",
])
def test_skipif_class(testdir):
p = testdir.makepyfile("""
import pytest
class TestClass:
pytestmark = pytest.mark.skipif("True")
def test_that(self):
assert 0
def test_though(self):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*2 skipped*"
])
def test_skip_reasons_folding():
path = 'xyz'
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = X()
ev1.when = "execute"
ev1.skipped = True
ev1.longrepr = longrepr
ev2 = X()
ev2.longrepr = longrepr
ev2.skipped = True
l = folded_skips([ev1, ev2])
assert len(l) == 1
num, fspath, lineno, reason = l[0]
assert num == 2
assert fspath == path
assert lineno == lineno
assert reason == message
def test_skipped_reasons_functional(testdir):
testdir.makepyfile(
test_one="""
from conftest import doskip
def setup_function(func):
doskip()
def test_func():
pass
class TestClass:
def test_method(self):
doskip()
""",
test_two = """
from conftest import doskip
doskip()
""",
conftest = """
import pytest
def doskip():
pytest.skip('test')
"""
)
result = testdir.runpytest('--report=skipped')
result.stdout.fnmatch_lines([
"*SKIP*3*conftest.py:3: test",
])
assert result.ret == 0
def test_reportchars(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-rfxXs")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"XFAIL*test_2*",
"XPASS*test_3*",
"SKIP*four*",
])
def test_reportchars_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""")
result = testdir.runpytest('-rE')
result.stdout.fnmatch_lines([
'ERROR*test_foo*',
])
def test_reportchars_all(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"SKIP*four*",
"XFAIL*test_2*",
"XPASS*test_3*",
])
def test_reportchars_all_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""")
result = testdir.runpytest('-ra')
result.stdout.fnmatch_lines([
'ERROR*test_foo*',
])
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
def test_errors_in_xfail_skip_expressions(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif("asd")
def test_nameerror():
pass
@pytest.mark.xfail("syntax error")
def test_syntax():
pass
def test_func():
pass
""")
result = testdir.runpytest()
markline = " ^"
if sys.platform.startswith("java"):
# XXX report this to java
markline = "*" + markline[8:]
result.stdout.fnmatch_lines([
"*ERROR*test_nameerror*",
"*evaluating*skipif*expression*",
"*asd*",
"*ERROR*test_syntax*",
"*evaluating*xfail*expression*",
" syntax error",
markline,
"SyntaxError: invalid syntax",
"*1 pass*2 error*",
])
def test_xfail_skipif_with_globals(testdir):
testdir.makepyfile("""
import pytest
x = 3
@pytest.mark.skipif("x == 3")
def test_skip1():
pass
@pytest.mark.xfail("x == 3")
def test_boolean():
assert 0
""")
result = testdir.runpytest("-rsx")
result.stdout.fnmatch_lines([
"*SKIP*x == 3*",
"*XFAIL*test_boolean*",
"*x == 3*",
])
def test_direct_gives_error(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 error*",
])
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
])
def test_xfail_test_setup_exception(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
0 / 0
""")
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
result = testdir.runpytest(p)
assert result.ret == 0
assert 'xfailed' in result.stdout.str()
assert 'xpassed' not in result.stdout.str()
def test_imperativeskip_on_xfail_test(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_that_fails():
assert 0
@pytest.mark.skipif("True")
def test_hello():
pass
""")
testdir.makeconftest("""
import pytest
def pytest_runtest_setup(item):
pytest.skip("abc")
""")
result = testdir.runpytest("-rsxX")
result.stdout.fnmatch_lines_random("""
*SKIP*abc*
*SKIP*condition: True*
*2 skipped*
""")
class TestBooleanCondition:
def test_skipif(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True, reason="True123")
def test_func1():
pass
@pytest.mark.skipif(False, reason="True123")
def test_func2():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*1 skipped*
""")
def test_skipif_noreason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_func():
pass
""")
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines("""
*1 error*
""")
def test_xfail(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail(True, reason="True123")
def test_func():
assert 0
""")
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines("""
*XFAIL*
*True123*
*1 xfail*
""")
def test_xfail_item(testdir):
# Ensure pytest.xfail works with non-Python Item
testdir.makeconftest("""
import pytest
class MyItem(pytest.Item):
nodeid = 'foo'
def runtest(self):
pytest.xfail("Expected Failure")
def pytest_collect_file(path, parent):
return MyItem("foo", parent)
""")
result = testdir.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, 'wasxfail')]
assert xfailed
| mpl-2.0 |
sestrella/ansible | lib/ansible/modules/network/check_point/cp_mgmt_group_with_exclusion.py | 20 | 5215 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_group_with_exclusion
short_description: Manages group-with-exclusion objects on Check Point over Web Services API
description:
- Manages group-with-exclusion objects on Check Point devices including creating, updating and removing objects.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
except:
description:
- Name or UID of an object which the group excludes.
type: str
include:
description:
- Name or UID of an object which the group includes.
type: str
tags:
description:
- Collection of tag identifiers.
type: list
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
groups:
description:
- Collection of group identifiers.
type: list
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: checkpoint_objects
"""
EXAMPLES = """
- name: add-group-with-exclusion
cp_mgmt_group_with_exclusion:
except: New Group 2
include: New Group 1
name: Group with exclusion
state: present
- name: set-group-with-exclusion
cp_mgmt_group_with_exclusion:
except: New Group 1
include: New Group 2
name: Group with exclusion
state: present
- name: delete-group-with-exclusion
cp_mgmt_group_with_exclusion:
name: Group with exclusion
state: absent
"""
RETURN = """
cp_mgmt_group_with_exclusion:
description: The checkpoint object created or updated.
returned: always, except when deleting the object.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_objects, api_call
def main():
argument_spec = dict(
name=dict(type='str', required=True),
include=dict(type='str'),
tags=dict(type='list'),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
groups=dict(type='list'),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)
argument_spec['except'] = dict(type='str')
argument_spec.update(checkpoint_argument_spec_for_objects)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
api_call_object = 'group-with-exclusion'
result = api_call(module, api_call_object)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jean/sentry | src/sentry/api/endpoints/organization_release_commits.py | 4 | 1532 | from __future__ import absolute_import
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationReleasesBaseEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseCommit
class OrganizationReleaseCommitsEndpoint(OrganizationReleasesBaseEndpoint):
doc_section = DocSection.RELEASES
def get(self, request, organization, version):
"""
List an Organization Release's Commits
``````````````````````````````````````
Retrieve a list of commits for a given release.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
projects__in=self.get_allowed_projects(request, organization),
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
queryset = ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author')
return self.paginate(
request=request,
queryset=queryset,
order_by='order',
on_results=lambda x: serialize([rc.commit for rc in x], request.user),
)
| bsd-3-clause |
alekLukanen/pyDist | tests/tasks/running_tasks.py | 1 | 1223 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 22:03:23 2017
@author: alek
"""
import sys
sys.path.append('../../')
from pyDist import Nodes
import logging
import sys
from pyDist.TaskManager import TaskManager
#change these up for use in other cases
taskManager = TaskManager()
#taskManager.executor = concurrent.futures.ThreadPoolExecutor(4)
#logging utility
logging.basicConfig(format='%(filename)-20s:%(lineno)-43s | %(levelname)-8s | %(message)s'
, stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
def run_this(i):
logger.debug('run_this() was called at index %d' % i)
return True
def callback(a):
logger.debug('callback(%s)' % a)
def start_node():
logger.debug('starting the node (PROCESS MAIN)')
node = Nodes.ClusterNode()
for i in range(0,3):
task = node.taskManager.executor.submit(run_this,i)
task.add_done_callback(node.work_item_finished_callback)
node.taskManager.submit(task)
#node.start_updating()
#node.boot('0.0.0.0', 9000)
#logger.debug('node stopped')
return node
if __name__ == '__main__':
logger.debug('basic task running test')
node = start_node()
| mit |
opentelega/OpenTelega | fileserver/views.py | 1 | 4258 | from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from fileserver.UserWorker import GetUserFromPostRequest, \
GetRecipientsFromPostRequest, GetUploadedFileFromPostRequest, \
GetFileListByUser, GetFilesCountByUser, GetFileResponseFromPostRequest, \
ChangePasswordFromPostRequest, SaveUploadedFile
from fileserver.ResponseWorker import ResponseByType, ResponseType
from fileserver.ResponseFormatWorker import GenerateOutput
from fileserver.OutputTableHeader import OutputTableHeader
@csrf_exempt
def upload_file(request):
try:
(getUserCode, user) = GetUserFromPostRequest(request)
if (getUserCode != ResponseType.OK):
return ResponseByType(getUserCode, request)
(getRecipientsCode, recipients) = \
GetRecipientsFromPostRequest(request)
if (getRecipientsCode != ResponseType.OK):
return ResponseByType(getRecipientsCode, request)
(getFileCode, uploadedFile) = GetUploadedFileFromPostRequest(request)
if (getFileCode != ResponseType.OK):
return ResponseByType(getFileCode, request)
(saveUploadedFileCode, fileId) = \
SaveUploadedFile(uploadedFile, user, recipients)
if (saveUploadedFileCode != ResponseType.OK):
return ResponseByType(saveUploadedFileCode, request)
header = OutputTableHeader.UploadFile.value
data = ((str(fileId),),)
return HttpResponse(GenerateOutput(header, data, request))
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def get_file_list(request):
try:
(getUserCode, user) = GetUserFromPostRequest(request)
if (getUserCode != ResponseType.OK):
return ResponseByType(getUserCode, request)
(getFileListCode, fileList) = GetFileListByUser(request, user)
if (getFileListCode != ResponseType.OK):
return ResponseByType(getFileListCode, request)
return HttpResponse(fileList)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def get_files_count(request):
try:
(getUserCode, user) = GetUserFromPostRequest(request)
if (getUserCode != ResponseType.OK):
return ResponseByType(getUserCode, request)
(getFilesCountCode, count) = GetFilesCountByUser(user)
if (getFilesCountCode != ResponseType.OK):
return ResponseByType(getFilesCountCode, request)
header = OutputTableHeader.GetNumberOfFiles.value
data = ((str(count),),)
return HttpResponse(GenerateOutput(header, data, request))
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def download_file_by_id(request):
try:
(getUserCode, user) = GetUserFromPostRequest(request)
if (getUserCode != ResponseType.OK):
return ResponseByType(getUserCode, request)
(getFileResponseCode, fileResponse) = \
GetFileResponseFromPostRequest(request, user)
if (getFileResponseCode != ResponseType.OK):
return ResponseByType(getFileResponseCode, request)
return fileResponse
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def change_user_password(request):
try:
(getUserCode, user) = GetUserFromPostRequest(request)
if (getUserCode != ResponseType.OK):
return ResponseByType(getUserCode, request)
changeNewPasswordCode = ChangePasswordFromPostRequest(request, user)
return ResponseByType(changeNewPasswordCode, request)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def user_get_version(request):
try:
(getUserCode, user) = GetUserFromPostRequest(request)
if (getUserCode != ResponseType.OK):
return ResponseByType(getUserCode, request)
header = OutputTableHeader.GetVersion.value
data = (("1.0",),)
outputTable = GenerateOutput(header, data, request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request) | mit |
stevenliuit/neon | neon/models/rbm.py | 4 | 3945 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Simple restricted Boltzmann Machine model.
"""
import logging
from neon.backends.backend import Block
from neon.models.model import Model
from neon.util.compat import range
logger = logging.getLogger(__name__)
class RBM(Model):
"""
Restricted Boltzmann Machine with binary visible and binary hidden units
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
for req_param in ['layers', 'batch_size']:
if not hasattr(self, req_param):
raise ValueError("required parameter: %s not specified" %
req_param)
self.cost.initialize(kwargs)
def link(self, initlayer=None):
"""
To make legacy config files work.
"""
pass
def initialize(self):
"""
To make legacy config files work.
"""
pass
def fit(self, dataset):
"""
Learn model weights on the given datasets.
"""
for layer in self.layers:
logger.info("%s", str(layer))
inputs = dataset.get_inputs(train=True)['train']
nin = self.layers[0].nin
self.nlayers = len(self.layers)
if 'temp_dtype' not in self.__dict__:
self.temp_dtype = None
self.temp = self.backend.empty((nin, self.batch_size), self.temp_dtype)
# we may include 1 smaller-sized partial batch if num recs is not an
# exact multiple of batch size.
num_batches = len(inputs)
logger.info('commencing model fitting')
error = self.backend.empty((1, 1))
while self.epochs_complete < self.num_epochs:
self.backend.begin(Block.epoch, self.epochs_complete)
error.fill(0.0)
for batch in range(num_batches):
self.backend.begin(Block.minibatch, batch)
inputs_batch = dataset.get_batch(inputs, batch)
self.backend.begin(Block.fprop, batch)
self.positive(inputs_batch)
self.backend.end(Block.fprop, batch)
self.backend.begin(Block.bprop, batch)
self.negative(inputs_batch)
self.backend.end(Block.bprop, batch)
self.backend.add(error, self.cost.apply_function(inputs_batch),
error)
self.backend.begin(Block.update, batch)
self.update(self.epochs_complete)
self.backend.end(Block.update, batch)
self.backend.end(Block.minibatch, batch)
logger.info('epoch: %d, total training error: %0.5f',
self.epochs_complete,
error.asnumpyarray() / num_batches)
self.backend.end(Block.epoch, self.epochs_complete)
self.epochs_complete += 1
def positive(self, inputs):
"""Wrapper for RBMLayer.positive"""
self.layers[0].positive(inputs)
return None
def negative(self, inputs):
"""Wrapper for RBMLayer.negative"""
self.layers[0].negative(inputs)
return None
def update(self, epoch):
"""Wrapper for RBMLayer.update"""
self.layers[0].update(epoch)
| apache-2.0 |
shikil/sympy | sympy/physics/quantum/tests/test_commutator.py | 124 | 1830 | from sympy import symbols, Integer
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator as Comm
from sympy.physics.quantum.operator import Operator
a, b, c = symbols('a,b,c')
A, B, C, D = symbols('A,B,C,D', commutative=False)
def test_commutator():
c = Comm(A, B)
assert c.is_commutative is False
assert isinstance(c, Comm)
assert c.subs(A, C) == Comm(C, B)
def test_commutator_identities():
assert Comm(a*A, b*B) == a*b*Comm(A, B)
assert Comm(A, A) == 0
assert Comm(a, b) == 0
assert Comm(A, B) == -Comm(B, A)
assert Comm(A, B).doit() == A*B - B*A
assert Comm(A, B*C).expand(commutator=True) == Comm(A, B)*C + B*Comm(A, C)
assert Comm(A*B, C*D).expand(commutator=True) == \
A*C*Comm(B, D) + A*Comm(B, C)*D + C*Comm(A, D)*B + Comm(A, C)*D*B
assert Comm(A + B, C + D).expand(commutator=True) == \
Comm(A, C) + Comm(A, D) + Comm(B, C) + Comm(B, D)
assert Comm(A, B + C).expand(commutator=True) == Comm(A, B) + Comm(A, C)
e = Comm(A, Comm(B, C)) + Comm(B, Comm(C, A)) + Comm(C, Comm(A, B))
assert e.doit().expand() == 0
def test_commutator_dagger():
comm = Comm(A*B, C)
assert Dagger(comm).expand(commutator=True) == \
- Comm(Dagger(B), Dagger(C))*Dagger(A) - \
Dagger(B)*Comm(Dagger(A), Dagger(C))
class Foo(Operator):
def _eval_commutator_Bar(self, bar):
return Integer(0)
class Bar(Operator):
pass
class Tam(Operator):
def _eval_commutator_Foo(self, foo):
return Integer(1)
def test_eval_commutator():
F = Foo('F')
B = Bar('B')
T = Tam('T')
assert Comm(F, B).doit() == 0
assert Comm(B, F).doit() == 0
assert Comm(F, T).doit() == -1
assert Comm(T, F).doit() == 1
assert Comm(B, T).doit() == B*T - T*B
| bsd-3-clause |
softlayer/softlayer-python | SoftLayer/CLI/sshkey/add.py | 2 | 1272 | """Add a new SSH key."""
# :license: MIT, see LICENSE for more details.
from os import path
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
@click.command()
@click.argument('label')
@click.option('--in-file', '-f',
type=click.Path(exists=True),
help="The id_rsa.pub file to import for this key")
@click.option('--key', '-k', help="The actual SSH key")
@click.option('--note', help="Extra note that will be associated with key")
@environment.pass_env
def cli(env, label, in_file, key, note):
"""Add a new SSH key."""
if in_file is None and key is None:
raise exceptions.ArgumentError(
'Either [-f | --in-file] or [-k | --key] arguments are required to add a key'
)
if in_file and key:
raise exceptions.ArgumentError(
'[-f | --in-file] is not allowed with [-k | --key]'
)
if key:
key_text = key
else:
with open(path.expanduser(in_file), 'rU') as key_file:
key_text = key_file.read().strip()
key_file.close()
mgr = SoftLayer.SshKeyManager(env.client)
result = mgr.add_key(key_text, label, note)
env.fout("SSH key added: %s" % result.get('fingerprint'))
| mit |
lakiw/cripts | cripts/config/views.py | 1 | 5837 | import json
import re
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from cripts.config.config import CRIPTsConfig
from cripts.config.forms import ConfigGeneralForm, ConfigLDAPForm, ConfigSecurityForm, ConfigCriptsForm
from cripts.config.forms import ConfigLoggingForm, ConfigServicesForm, ConfigDownloadForm
from cripts.config.handlers import modify_configuration
from cripts.core.user_tools import user_is_admin
@user_passes_test(user_is_admin)
def cripts_config(request):
"""
Generate the CRIPTs Configuration template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
cripts_config = CRIPTsConfig.objects().first()
if cripts_config:
cripts_config = cripts_config.to_dict()
cripts_config['allowed_hosts'] = ", ".join(cripts_config['allowed_hosts'])
cripts_config['service_dirs'] = ", ".join(cripts_config['service_dirs'])
config_general_form = ConfigGeneralForm(initial=cripts_config)
config_LDAP_form = ConfigLDAPForm(initial=cripts_config)
config_security_form = ConfigSecurityForm(initial=cripts_config)
config_logging_form = ConfigLoggingForm(initial=cripts_config)
config_services_form = ConfigServicesForm(initial=cripts_config)
config_download_form = ConfigDownloadForm(initial=cripts_config)
config_CRIPTs_form = ConfigCriptsForm(initial=cripts_config)
else:
config_general_form = ConfigGeneralForm()
config_LDAP_form = ConfigLDAPForm()
config_security_form = ConfigSecurityForm()
config_logging_form = ConfigLoggingForm()
config_services_form = ConfigServicesForm()
config_download_form = ConfigDownloadForm()
config_CRIPTs_form = ConfigCriptsForm()
return render_to_response('config.html',
{'config_general_form': config_general_form,
'config_LDAP_form': config_LDAP_form,
'config_security_form': config_security_form,
'config_logging_form': config_logging_form,
'config_services_form': config_services_form,
'config_download_form': config_download_form,
'config_CRIPTs_form': config_CRIPTs_form},
RequestContext(request))
@user_passes_test(user_is_admin)
def modify_config(request):
"""
Modify the CRIPTs Configuration. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
config_general_form = ConfigGeneralForm(request.POST)
config_LDAP_form = ConfigLDAPForm(request.POST)
config_security_form = ConfigSecurityForm(request.POST)
config_logging_form = ConfigLoggingForm(request.POST)
config_services_form = ConfigServicesForm(request.POST)
config_download_form = ConfigDownloadForm(request.POST)
config_CRIPTs_form = ConfigCriptsForm(request.POST)
forms = [config_general_form,
config_LDAP_form,
config_security_form,
config_logging_form,
config_services_form,
config_download_form,
config_CRIPTs_form]
#Used in defining the error message displayed to the user
errorStringDict = {
"ConfigGeneralForm": "General",
"ConfigLDAPForm": "LDAP",
"ConfigSecurityForm": "Security",
"ConfigLoggingForm": "Logging",
"ConfigServicesForm": "Services",
"ConfigDownloadForm": "Downloading",
"ConfigCriptsForm": "CRIPTs",
}
analyst = request.user.username
errors = []
#iterate over all the forms, checking if they're valid
#if the form is valid, remove it from the errorStringDict
for form in forms:
if form.is_valid():
formName = type(form).__name__
errorStringDict.pop(formName, None)
else:
errors.extend(form.errors)
#submit if the errorStringDict is empty
if not errorStringDict:
result = modify_configuration(forms, analyst)
message = result['message']
elif len(errorStringDict) == 2:
formsWithErrors = " and ".join(errorStringDict.values())
message = "Invalid Form: The " + formsWithErrors + " tabs have errors."
elif len(errorStringDict) > 1: #if there are multiple tabs with errors, pluralize the error message
formsWithErrors = ", ".join(errorStringDict.values())
lastWhiteSpace = formsWithErrors.rfind(" ")
formsWithErrors = formsWithErrors[:lastWhiteSpace] + " and " + formsWithErrors[lastWhiteSpace:]
message = "Invalid Form: The " + formsWithErrors + " tabs have errors."
else: #if there is only one tab with errors, make the error message singular
formsWithErrors = errorStringDict.values()[0]
message = "Invalid Form: The " + formsWithErrors + " tab has errors."
message = {'message': message,
'errors': errors}
return HttpResponse(json.dumps(message), content_type="application/json")
else:
return render_to_response('error.html',
{'error': 'Expected AJAX POST'},
RequestContext(request))
| mit |
jasonjoh/pythoncontacts | contacts/urls.py | 1 | 2326 | # Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license. See full license at the bottom of this file.
from django.conf.urls import patterns, url
from contacts import views
urlpatterns = patterns('',
# The home view ('/contacts/')
url(r'^$', views.index, name='index'),
# Used to start OAuth2 flow ('/contacts/connect/')
url(r'^connect/$', views.connect, name='connect'),
# Used as redirect target in OAuth2 flow ('/contacts/authorize/')
url(r'^authorize/$', views.authorize, name='authorize'),
# Displays a form to create a new contact ('/contacts/new/')
url(r'^new/$', views.new, name='new'),
# Invoked to create a new contact in Office 365 ('/contacts/create/')
url(r'^create/$', views.create, name='create'),
# Displays an existing contact in an editable form ('/contacts/edit/<contact_id>/')
url(r'^edit/(?P<contact_id>.+)/$', views.edit, name='edit'),
# Invoked to update an existing contact ('/contacts/update/<contact_id>/')
url(r'^update/(?P<contact_id>.+)/$', views.update, name='update'),
# Invoked to delete an existing contact ('/contacts/delete/<contact_id>/')
url(r'^delete/(?P<contact_id>.+)/$', views.delete, name='delete'),
)
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# ""Software""), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | mit |
garverp/gnuradio | gr-trellis/examples/python/test_cpm.py | 24 | 5571 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: CPM test
# Author: Achilleas Anastasopoulos
# Description: gnuradio flow graph
# Generated: Thu Feb 19 23:16:23 2009
##################################################
from gnuradio import gr
from gnuradio import trellis, digital, filter, blocks
from grc_gnuradio import blks2 as grc_blks2
import math
import numpy
from gnuradio import trellis
from gnuradio.trellis import fsm_utils
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy.stats
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
def run_test(seed,blocksize):
tb = gr.top_block()
##################################################
# Variables
##################################################
M = 2
K = 1
P = 2
h = (1.0*K)/P
L = 3
Q = 4
frac = 0.99
f = trellis.fsm(P,M,L)
# CPFSK signals
#p = numpy.ones(L*Q)
#p = p/sum(p)*Q/2.0;
#q = numpy.cumsum(p)
#q = q/q[-1]/2.0;
# GMSK signals
BT=0.3;
tt=numpy.arange(0,L*Q)/(1.0*Q)-L/2.0;
#print tt
p=(0.5*scipy.special.erfc(2*math.pi*BT*(tt-0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0))-0.5*scipy.special.erfc(2*math.pi*BT*(tt+0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0)))/2.0;
p=p/sum(p)*Q/2.0;
#print p
q=numpy.cumsum(p);
q=q/q[-1]/2.0;
#print q
(f0T,SS,S,F,Sf,Ff,N) = fsm_utils.make_cpm_signals(K,P,M,L,q,frac)
#print N
#print Ff
Ffa = numpy.insert(Ff,Q,numpy.zeros(N),axis=0)
#print Ffa
MF = numpy.fliplr(numpy.transpose(Ffa))
#print MF
E = numpy.sum(numpy.abs(Sf)**2,axis=0)
Es = numpy.sum(E)/f.O()
#print Es
constellation = numpy.reshape(numpy.transpose(Sf),N*f.O())
#print Ff
#print Sf
#print constellation
#print numpy.max(numpy.abs(SS - numpy.dot(Ff , Sf)))
EsN0_db = 10.0
N0 = Es * 10.0**(-(1.0*EsN0_db)/10.0)
#N0 = 0.0
#print N0
head = 4
tail = 4
numpy.random.seed(seed*666)
data = numpy.random.randint(0, M, head+blocksize+tail+1)
#data = numpy.zeros(blocksize+1+head+tail,'int')
for i in range(head):
data[i]=0
for i in range(tail+1):
data[-i]=0
##################################################
# Blocks
##################################################
random_source_x_0 = blocks.vector_source_b(data.tolist(), False)
digital_chunks_to_symbols_xx_0 = digital.chunks_to_symbols_bf((-1, 1), 1)
filter_interp_fir_filter_xxx_0 = filter.interp_fir_filter_fff(Q, p)
analog_frequency_modulator_fc_0 = analog.frequency_modulator_fc(2*math.pi*h*(1.0/Q))
blocks_add_vxx_0 = blocks.add_vcc(1)
analog_noise_source_x_0 = analog.noise_source_c(analog.GR_GAUSSIAN, (N0/2.0)**0.5, -long(seed))
blocks_multiply_vxx_0 = blocks.multiply_vcc(1)
analog_sig_source_x_0 = analog.sig_source_c(Q, analog.GR_COS_WAVE, -f0T, 1, 0)
# only works for N=2, do it manually for N>2...
filter_fir_filter_xxx_0_0 = filter.fir_filter_ccc(Q, MF[0].conjugate())
filter_fir_filter_xxx_0_0_0 = filter.fir_filter_ccc(Q, MF[1].conjugate())
blocks_streams_to_stream_0 = blocks.streams_to_stream(gr.sizeof_gr_complex*1, int(N))
blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*1, int(N*(1+0)))
viterbi = trellis.viterbi_combined_cb(f, head+blocksize+tail, 0, -1, int(N),
constellation, digital.TRELLIS_EUCLIDEAN)
blocks_vector_sink_x_0 = blocks.vector_sink_b()
##################################################
# Connections
##################################################
tb.connect((random_source_x_0, 0), (digital_chunks_to_symbols_xx_0, 0))
tb.connect((digital_chunks_to_symbols_xx_0, 0), (filter_interp_fir_filter_xxx_0, 0))
tb.connect((filter_interp_fir_filter_xxx_0, 0), (analog_frequency_modulator_fc_0, 0))
tb.connect((analog_frequency_modulator_fc_0, 0), (blocks_add_vxx_0, 0))
tb.connect((analog_noise_source_x_0, 0), (blocks_add_vxx_0, 1))
tb.connect((blocks_add_vxx_0, 0), (blocks_multiply_vxx_0, 0))
tb.connect((analog_sig_source_x_0, 0), (blocks_multiply_vxx_0, 1))
tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0, 0))
tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0_0, 0))
tb.connect((filter_fir_filter_xxx_0_0, 0), (blocks_streams_to_stream_0, 0))
tb.connect((filter_fir_filter_xxx_0_0_0, 0), (blocks_streams_to_stream_0, 1))
tb.connect((blocks_streams_to_stream_0, 0), (blocks_skiphead_0, 0))
tb.connect((blocks_skiphead_0, 0), (viterbi, 0))
tb.connect((viterbi, 0), (blocks_vector_sink_x_0, 0))
tb.run()
dataest = blocks_vector_sink_x_0.data()
#print data
#print numpy.array(dataest)
perr = 0
err = 0
for i in range(blocksize):
if data[head+i] != dataest[head+i]:
#print i
err += 1
if err != 0 :
perr = 1
return (err,perr)
if __name__ == '__main__':
blocksize = 1000
ss=0
ee=0
for i in range(10000):
(s,e) = run_test(i,blocksize)
ss += s
ee += e
if (i+1) % 100 == 0:
print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
| gpl-3.0 |
KanoComputing/kano-settings | kano_settings/system/wallpaper.py | 1 | 3383 | #!/usr/bin/env python
# wallpaper.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Backend wallpaper functions
#
from kano.logging import logger
from kano.utils import get_user_unsudoed
import os
KDESKRC_SYSTEM = "/usr/share/kano-desktop/kdesk/.kdeskrc"
KDESKRC_HOME_TEMPLATE = "/home/{user}/.kdeskrc"
PARAM_NAME_TEMPLATE = "Background.File-{size}"
SIZE_SUFFIX_MAP = {
"medium": "-1024.png",
"4-3": "-4-3.png",
"16-9": "-16-9.png"
}
def _get_wallpaper_from_kdeskrc(kdeskrc_path):
"""
Parses a .kdeskrc file and looks for wallpaper configuration keys.
:param kdeskrc_path: Location of the file to be parsed.
:type kdeskrc_path: str
:return: A map of wallpaper types to file paths.
:rtype: dict
"""
wallpapers = {}
if not os.path.isfile(kdeskrc_path):
return wallpapers
with open(kdeskrc_path, 'r') as kdesk_conf:
for kdesk_conf_line in kdesk_conf:
for key, suffix in SIZE_SUFFIX_MAP.iteritems():
conf_param = PARAM_NAME_TEMPLATE.format(size=key)
token = "{}:".format(conf_param)
if token in kdesk_conf_line:
path = kdesk_conf_line.split(token)[-1].strip()
wallpapers[key] = path
return wallpapers
def get_current_wallpapers():
"""
Returns the current wallpaper configuration.
:return: A map of wallpaper types to file paths.
:rtype: dict
"""
wallpapers = {key: None for key in SIZE_SUFFIX_MAP.iterkeys()}
wallpapers.update(_get_wallpaper_from_kdeskrc(KDESKRC_SYSTEM))
home_dir = KDESKRC_HOME_TEMPLATE.format(user=get_user_unsudoed())
wallpapers.update(_get_wallpaper_from_kdeskrc(home_dir))
return wallpapers
def change_wallpaper(path, name):
logger.info('set_wallpaper / change_wallpaper image_name:{}'.format(name))
# home directory
user = get_user_unsudoed()
deskrc_path = KDESKRC_HOME_TEMPLATE.format(user=user)
conf_params = {}
for size, suffix in SIZE_SUFFIX_MAP.iteritems():
conf_param = PARAM_NAME_TEMPLATE.format(size=size)
image = os.path.join(path, "{}{}".format(name, suffix))
conf_params[conf_param] = " {param}: {image}".format(param=conf_param,
image=image)
found = False
newlines = []
if os.path.isfile(deskrc_path):
with open(deskrc_path, 'r') as kdesk_conf:
for kdesk_conf_line in kdesk_conf:
for conf_param, conf_line in conf_params.iteritems():
if conf_param in kdesk_conf_line:
kdesk_conf_line = conf_line
found = True
newlines.append(kdesk_conf_line + '\n')
if found:
# Overwrite config file with new lines
with open(deskrc_path, 'w') as outfile:
outfile.writelines(newlines)
else:
# Not found so add it
with open(deskrc_path, 'a') as outfile:
for conf_line in conf_params.itervalues():
outfile.write(conf_line + '\n')
# Refresh the wallpaper
os.system('sudo -u {user} kdesk -w'.format(user=user))
return 0
# Module debug
if __name__ == '__main__':
print get_current_wallpapers()
| gpl-2.0 |
adamchainz/ansible | lib/ansible/modules/network/nxos/nxos_vtp_domain.py | 46 | 5924 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_domain
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP domain configuration.
description:
- Manages VTP domain configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP domain names.
- VTP domain names are case-sensible.
- If it's never been configured before, VTP version is set to 1 by default.
Otherwise, it leaves the previous configured version untouched.
Use M(nxos_vtp_version) to change it.
- Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
to fully manage VTP operations.
options:
domain:
description:
- VTP domain name.
required: true
'''
EXAMPLES = '''
# ENSURE VTP DOMAIN IS CONFIGURED
- nxos_vtp_domain:
domain: ntc
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"domain": "ntc"}
existing:
description:
- k/v pairs of existing vtp domain
returned: always
type: dict
sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
end_state:
description: k/v pairs of vtp domain after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "2", "vtp_password": "\"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp domain ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'status' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
domain=dict(type='str', required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
domain = module.params['domain']
existing = get_vtp_config(module)
end_state = existing
args = dict(domain=domain)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if delta:
commands.append(['vtp domain {0}'.format(domain)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
vizual54/MissionPlanner | Lib/uuid.py | 55 | 21655 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
with os.popen(cmd) as pipe:
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(
words[get_index(i)].replace(':', ''), 16)
except IOError:
continue
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
else:
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
finally:
pipe.close()
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
# in the parent process and all children created using fork (unless
# those children use exec as well).
#
# Assume that the uuid_generate functions are broken from 10.5 onward,
# the test can be adjusted when a later version is fixed.
import sys
if sys.platform == 'darwin':
import os
if int(os.uname()[2].split('.')[0]) >= 9:
_uuid_generate_random = _uuid_generate_time = None
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds//100) + 0x01b21dd213814000L
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| gpl-3.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/platform/gsutil/third_party/boto/boto/cloudfront/object.py | 170 | 1798 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.key import Key
class Object(Key):
def __init__(self, bucket, name=None):
super(Object, self).__init__(bucket, name=name)
self.distribution = bucket.distribution
def __repr__(self):
return '<Object: %s/%s>' % (self.distribution.config.origin, self.name)
def url(self, scheme='http'):
url = '%s://' % scheme
url += self.distribution.domain_name
if scheme.lower().startswith('rtmp'):
url += '/cfx/st/'
else:
url += '/'
url += self.name
return url
class StreamingObject(Object):
def url(self, scheme='rtmp'):
return super(StreamingObject, self).url(scheme)
| apache-2.0 |
jcasner/nupic | src/nupic/data/inference_shifter.py | 39 | 3485 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TimeShifter class for shifting ModelResults."""
import collections
import copy
from nupic.frameworks.opf.opfutils import InferenceElement, ModelResult
class InferenceShifter(object):
"""Shifts time for ModelResult objects."""
def __init__(self):
self._inferenceBuffer = None
def shift(self, modelResult):
"""Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
Args:
modelResult: A ModelResult instance to shift.
Returns:
A ModelResult instance.
"""
inferencesToWrite = {}
if self._inferenceBuffer is None:
maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1)
self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))
for inferenceElement, inference in modelResult.inferences.iteritems():
if isinstance(inference, dict):
inferencesToWrite[inferenceElement] = {}
for key, _ in inference.iteritems():
delay = InferenceElement.getTemporalDelay(inferenceElement, key)
if len(self._inferenceBuffer) > delay:
prevInference = self._inferenceBuffer[delay][inferenceElement][key]
inferencesToWrite[inferenceElement][key] = prevInference
else:
inferencesToWrite[inferenceElement][key] = None
else:
delay = InferenceElement.getTemporalDelay(inferenceElement)
if len(self._inferenceBuffer) > delay:
inferencesToWrite[inferenceElement] = (
self._inferenceBuffer[delay][inferenceElement])
else:
if type(inference) in (list, tuple):
inferencesToWrite[inferenceElement] = [None] * len(inference)
else:
inferencesToWrite[inferenceElement] = None
shiftedResult = ModelResult(rawInput=modelResult.rawInput,
sensorInput=modelResult.sensorInput,
inferences=inferencesToWrite,
metrics=modelResult.metrics,
predictedFieldIdx=modelResult.predictedFieldIdx,
predictedFieldName=modelResult.predictedFieldName)
return shiftedResult
| agpl-3.0 |
fuziontech/sentry | src/sentry/plugins/bases/issue.py | 25 | 7899 | """
sentry.plugins.bases.issue
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.utils.html import format_html
from social_auth.models import UserSocialAuth
from sentry.models import GroupMeta, Activity
from sentry.plugins import Plugin
from sentry.utils.auth import get_auth_providers
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
class NewIssueForm(forms.Form):
title = forms.CharField(max_length=200, widget=forms.TextInput(attrs={'class': 'span9'}))
description = forms.CharField(widget=forms.Textarea(attrs={'class': 'span9'}))
class IssueTrackingPlugin(Plugin):
# project_conf_form = BaseIssueOptionsForm
new_issue_form = NewIssueForm
create_issue_template = 'sentry/plugins/bases/issue/create_issue.html'
not_configured_template = 'sentry/plugins/bases/issue/not_configured.html'
needs_auth_template = 'sentry/plugins/bases/issue/needs_auth.html'
auth_provider = None
def _get_group_body(self, request, group, event, **kwargs):
result = []
for interface in event.interfaces.itervalues():
output = safe_execute(interface.to_string, event)
if output:
result.append(output)
return '\n\n'.join(result)
def _get_group_description(self, request, group, event):
output = [
absolute_uri(group.get_absolute_url()),
]
body = self._get_group_body(request, group, event)
if body:
output.extend([
'',
'```',
body,
'```',
])
return '\n'.join(output)
def _get_group_title(self, request, group, event):
return event.error()
def is_configured(self, request, project, **kwargs):
raise NotImplementedError
def get_auth_for_user(self, user, **kwargs):
"""
Return a ``UserSocialAuth`` object for the given user based on this plugins ``auth_provider``.
"""
assert self.auth_provider, 'There is no auth provider configured for this plugin.'
if not user.is_authenticated():
return None
try:
return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0]
except IndexError:
return None
def needs_auth(self, request, project, **kwargs):
"""
Return ``True`` if the authenticated user needs to associate an auth service before
performing actions with this plugin.
"""
if self.auth_provider is None:
return False
if not request.user.is_authenticated():
return True
return bool(not UserSocialAuth.objects.filter(user=request.user, provider=self.auth_provider).exists())
def get_new_issue_title(self, **kwargs):
"""
Return a string for the "Create new issue" action label.
"""
return 'Create %s Issue' % self.get_title()
def get_new_issue_form(self, request, group, event, **kwargs):
"""
Return a Form for the "Create new issue" page.
"""
return self.new_issue_form(request.POST or None, initial=self.get_initial_form_data(request, group, event))
def get_issue_url(self, group, issue_id, **kwargs):
"""
Given an issue_id (string) return an absolute URL to the issue's details
page.
"""
raise NotImplementedError
def get_issue_label(self, group, issue_id, **kwargs):
"""
Given an issue_id (string) return a string representing the issue.
e.g. GitHub represents issues as GH-XXX
"""
return '#%s' % issue_id
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
raise NotImplementedError
def get_initial_form_data(self, request, group, event, **kwargs):
return {
'description': self._get_group_description(request, group, event),
'title': self._get_group_title(request, group, event),
}
def has_auth_configured(self, **kwargs):
if not self.auth_provider:
return True
return self.auth_provider in get_auth_providers()
def view(self, request, group, **kwargs):
has_auth_configured = self.has_auth_configured()
if not (has_auth_configured and self.is_configured(project=group.project, request=request)):
if self.auth_provider:
required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider]
else:
required_auth_settings = None
return self.render(self.not_configured_template, {
'title': self.get_title(),
'project': group.project,
'has_auth_configured': has_auth_configured,
'required_auth_settings': required_auth_settings,
})
if self.needs_auth(project=group.project, request=request):
return self.render(self.needs_auth_template, {
'title': self.get_title(),
'project': group.project,
})
if GroupMeta.objects.get_value(group, '%s:tid' % self.get_conf_key(), None):
return None
prefix = self.get_conf_key()
event = group.get_latest_event()
form = self.get_new_issue_form(request, group, event)
if form.is_valid():
try:
issue_id = self.create_issue(
group=group,
form_data=form.cleaned_data,
request=request,
)
except forms.ValidationError as e:
form.errors['__all__'] = [u'Error creating issue: %s' % e]
if form.is_valid():
GroupMeta.objects.set_value(group, '%s:tid' % prefix, issue_id)
issue_information = {
'title': form.cleaned_data['title'],
'provider': self.get_title(),
'location': self.get_issue_url(group, issue_id),
'label': self.get_issue_label(group=group, issue_id=issue_id),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
return self.redirect(group.get_absolute_url())
context = {
'form': form,
'title': self.get_new_issue_title(),
}
return self.render(self.create_issue_template, context)
def actions(self, request, group, action_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return action_list
prefix = self.get_conf_key()
if not GroupMeta.objects.get_value(group, '%s:tid' % prefix, None):
action_list.append((self.get_new_issue_title(), self.get_url(group)))
return action_list
def tags(self, request, group, tag_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return tag_list
prefix = self.get_conf_key()
issue_id = GroupMeta.objects.get_value(group, '%s:tid' % prefix)
if not issue_id:
return tag_list
tag_list.append(format_html('<a href="{}">{}</a>',
self.get_issue_url(group=group, issue_id=issue_id),
self.get_issue_label(group=group, issue_id=issue_id),
))
return tag_list
def get_issue_doc_html(self, **kwargs):
return ""
IssuePlugin = IssueTrackingPlugin
| bsd-3-clause |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py | 14 | 4604 | # Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
dmillington/ansible-modules-core | cloud/rackspace/rax_queue.py | 51 | 3837 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_queue
short_description: create / delete a queue in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud queue.
version_added: "1.5"
options:
name:
description:
- Name to give the queue
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Queue
gather_facts: False
hosts: local
connection: local
tasks:
- name: Queue create request
local_action:
module: rax_queue
credentials: ~/.raxpub
name: my-queue
region: DFW
state: present
register: my_queue
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_queue(module, state, name):
for arg in (state, name):
if not arg:
module.fail_json(msg='%s is required for rax_queue' % arg)
changed = False
queues = []
instance = {}
cq = pyrax.queues
if not cq:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for queue in cq.list():
if name != queue.name:
continue
queues.append(queue)
if len(queues) > 1:
module.fail_json(msg='Multiple Queues were matched by name')
if state == 'present':
if not queues:
try:
queue = cq.create(name)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
queue = queues[0]
instance = dict(name=queue.name)
result = dict(changed=changed, queue=instance)
module.exit_json(**result)
elif state == 'absent':
if queues:
queue = queues[0]
try:
queue.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, queue=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_queue(module, state, name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
| gpl-3.0 |
maddyonline/forked-cui | views.py | 1 | 4909 | '''
Copyright (C) 2014 Codility Limited. <https://codility.com>
This file is part of Candidate User Interface (CUI).
CUI is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version accepted in a public statement
by Codility Limited.
CUI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with CUI. If not, see <http://www.gnu.org/licenses/>.
'''
from django.template.response import SimpleTemplateResponse
from django.conf import settings
from django.template.response import HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
SERVER_UI_OPTIONS = {
"ticket_id": "TICKET_ID",
"time_elapsed_sec": 15,
"time_remaining_sec": 1800,
"current_human_lang": "en",
"current_prg_lang": "c",
"current_task_name": "task1",
"task_names": ["task1", "task2", "task3"],
"human_langs": {
"en": {"name_in_itself": "English"},
"cn": {"name_in_itself": "\u4e2d\u6587"},
},
"prg_langs": {
"c": {"version": "C", "name": "C"},
"sql": {"version": "SQL", "name": "SQL"},
"cpp": {"version": "C++", "name": "C++"},
},
"show_survey": True,
"show_help": False,
"show_welcome": True,
"sequential": False,
"save_often": True,
"urls": {
"status": "/chk/status/",
"get_task": "/bit/c/_get_task/",
"submit_survey": "/surveys/_ajax_submit_candidate_survey/TICKET_ID/",
"clock": "/chk/clock/",
"close": "/c/close/TICKET_ID",
"verify": "/chk/verify/",
"save": "/chk/save/",
"timeout_action": "/chk/timeout_action/",
"final": "/chk/final/",
"start_ticket": "/bit/c/_start/"
},
}
TASK_RESP = {
"task_status": "open",
"task_description": "Description: task1,en,c",
"task_type": "algo",
"solution_template": "",
"current_solution": "",
"example_input": "",
"prg_lang_list": ["c", "cpp"],
"human_lang_list": ["en", "cn"],
"prg_lang": "c",
"human_lang": "en"
}
XML_RESP = """<?xml version="1.0" encoding="UTF-8"?>
<response>
<current_solution />
<example_input />
<human_lang>en</human_lang>
<human_lang_list>
<element>en</element>
<element>cn</element>
</human_lang_list>
<prg_lang>c</prg_lang>
<prg_lang_list>
<element>c</element>
<element>cpp</element>
</prg_lang_list>
<solution_template />
<task_description>Description: task1,en,c</task_description>
<task_status>open</task_status>
<task_type>algo</task_type>
</response>"""
XML_RESP2 = """<response>
<task_status>open</task_status>
<task_description>Description: task1,en,c</task_description>
<task_type>algo</task_type>
<solution_template></solution_template>
<current_solution></current_solution>
<example_input></example_input>
<prg_lang_list>["c","cpp"]</prg_lang_list>
<human_lang_list>["en","cn"]</human_lang_list>
<prg_lang>c</prg_lang>
<human_lang>en</human_lang>
</response>"""
def render_cui(context):
context = context.copy()
context['STATIC_URL'] = settings.STATIC_URL
context['DEBUG'] = settings.DEBUG
context['ticket'] = { 'id': 'TICKET_ID' }
context['in_devel'] = True
return SimpleTemplateResponse('cui/cui.html', context)
def cui_test(request):
'''Scaffolding for candidate interface JS tests.'''
return render_cui({
'in_test': True,
'title': 'CUI tests'
})
def cui_local(request):
'''Scaffolding for candidate interface with mock local server.'''
return render_cui({
'in_local': True,
'title': 'BISCUIT',
})
@csrf_exempt
def server_options(request):
'''Server options.'''
print("Here")
return HttpResponse(json.dumps(SERVER_UI_OPTIONS))
@csrf_exempt
def bit_handler(request, cmd):
print("In bit_handler, received {cmd}".format(cmd=cmd))
if cmd == '_start':
return HttpResponse(json.dumps({'started': 'OK'}), content_type="application/json")
elif cmd == '_get_task':
return HttpResponse(XML_RESP2, content_type="text/xml; charset=utf-8")
else:
return HttpResponse("Not Found")
@csrf_exempt
def clock_handler(request):
print('Clock handler called')
return HttpResponse(json.dumps({
"result": "OK",
"new_timelimit": -1426973060
}), content_type="application/json")
| gpl-3.0 |
andrewcmyers/tensorflow | tensorflow/python/framework/dtypes.py | 30 | 19314 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return (self._type_enum != types_pb2.DT_RESOURCE and
self._type_enum != types_pb2.DT_RESOURCE_REF)
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
issubclass(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return self.is_numpy_compatible and issubclass(self.as_numpy_dtype,
np.floating)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
min, max : tuple
Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __int__(self):
return self._type_enum
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
if self._type_enum == types_pb2.DT_RESOURCE:
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
}
_STRING_TO_TF = {value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
# Ref types
types_pb2.DT_HALF_REF: np.float16,
types_pb2.DT_FLOAT_REF: np.float32,
types_pb2.DT_DOUBLE_REF: np.float64,
types_pb2.DT_INT32_REF: np.int32,
types_pb2.DT_UINT8_REF: np.uint8,
types_pb2.DT_UINT16_REF: np.uint16,
types_pb2.DT_INT16_REF: np.int16,
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
types_pb2.DT_QUINT8_REF: _np_quint8,
types_pb2.DT_QINT16_REF: _np_qint16,
types_pb2.DT_QUINT16_REF: _np_quint16,
types_pb2.DT_QINT32_REF: _np_qint32,
types_pb2.DT_BFLOAT16_REF: np.uint16,
}
QUANTIZED_DTYPES = frozenset(
[qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref])
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e))
raise TypeError(
"Cannot convert value %r to a TensorFlow DType." % type_value)
| apache-2.0 |
torcolvin/openbabel | scripts/python/pybel.py | 2 | 33936 | # -*. coding: utf-8 -*-
# Copyright (c) 2008-2012, Noel O'Boyle; 2012, Adrià Cereto-Massagué
# All rights reserved.
#
# This file is part of Cinfony.
# The contents are covered by the terms of the GPL v2 license
# which is included in the file LICENSE_GPLv2.txt.
"""
pybel - A Cinfony module for accessing Open Babel
Global variables:
ob - the underlying SWIG bindings for Open Babel
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
import sys
import os.path
import tempfile
import json
import uuid
import xml.etree.ElementTree as ET
if sys.platform[:4] == "java":
import org.openbabel as ob
import java.lang.System
java.lang.System.loadLibrary("openbabel_java")
_obfuncs = ob.openbabel_java
_obconsts = ob.openbabel_javaConstants
import javax
elif sys.platform[:3] == "cli":
import System
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Windows.Forms import Application, DockStyle, Form, PictureBox
from System.Windows.Forms import PictureBoxSizeMode
from System.Drawing import Image, Size
_obdotnet = os.environ["OBDOTNET"]
if _obdotnet[0] == '"': # Remove trailing quotes
_obdotnet = _obdotnet[1:-1]
clr.AddReferenceToFileAndPath(os.path.join(_obdotnet, "OBDotNet.dll"))
import OpenBabel as ob
_obfuncs = ob.openbabel_csharp
_obconsts = ob.openbabel_csharp
else:
import openbabel as ob
_obfuncs = _obconsts = ob
try:
import Tkinter as tk
import Image as PIL
import ImageTk as piltk
except ImportError: # pragma: no cover
tk = None
def _formatstodict(list):
if sys.platform[:4] == "java":
list = [list.get(i) for i in range(list.size())]
broken = [x.replace("[Read-only]", "").replace("[Write-only]", "").split(
" -- ") for x in list]
broken = [(x, y.strip()) for x, y in broken]
return dict(broken)
def _getplugins(findplugin, names):
return dict([(x, findplugin(x)) for x in names if findplugin(x)])
def _getpluginnames(ptype):
if sys.platform[:4] == "cli":
plugins = ob.VectorString()
else:
plugins = ob.vectorString()
ob.OBPlugin.ListAsVector(ptype, None, plugins)
if sys.platform[:4] == "java":
plugins = [plugins.get(i) for i in range(plugins.size())]
return [x.split()[0] for x in plugins if x.strip()]
_obconv = ob.OBConversion()
_builder = ob.OBBuilder()
informats = _formatstodict(_obconv.GetSupportedInputFormat())
"""A dictionary of supported input formats"""
outformats = _formatstodict(_obconv.GetSupportedOutputFormat())
"""A dictionary of supported output formats"""
descs = _getpluginnames("descriptors")
"""A list of supported descriptors"""
_descdict = _getplugins(ob.OBDescriptor.FindType, descs)
fps = [_x.lower() for _x in _getpluginnames("fingerprints")]
"""A list of supported fingerprint types"""
_fingerprinters = _getplugins(ob.OBFingerprint.FindFingerprint, fps)
forcefields = [_x.lower() for _x in _getpluginnames("forcefields")]
"""A list of supported forcefields"""
_forcefields = _getplugins(ob.OBForceField.FindType, forcefields)
charges = [_x.lower() for _x in _getpluginnames("charges")]
"""A list of supported charge models"""
_charges = _getplugins(ob.OBChargeModel.FindType, charges)
operations = _getpluginnames("ops")
"""A list of supported operations"""
_operations = _getplugins(ob.OBOp.FindType, operations)
ipython_3d = False
"""Toggles 2D vs 3D molecule representations in IPython notebook"""
def readfile(format, filename, opt=None):
"""Iterate over the molecules in a file.
Required parameters:
format - see the informats variable for a list of available
input formats
filename
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
You can access the first molecule in a file using the next() method
of the iterator (or the next() keyword in Python 3):
mol = readfile("smi", "myfile.smi").next() # Python 2
mol = next(readfile("smi", "myfile.smi")) # Python 3
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print atomtotal
43
"""
if opt is None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
if not os.path.isfile(filename):
raise IOError("No such file: '%s'" % filename)
def filereader():
obmol = ob.OBMol()
notatend = obconversion.ReadFile(obmol, filename)
while notatend:
yield Molecule(obmol)
obmol = ob.OBMol()
notatend = obconversion.Read(obmol)
return filereader()
def readstring(format, string, opt=None):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
if opt is None:
opt = {}
obmol = ob.OBMol()
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
success = obconversion.ReadString(obmol, string)
if not success:
raise IOError("Failed to convert '%s' to format '%s'" % (
string, format))
return Molecule(obmol)
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Although it's possible to write a single molecule to a file by
calling the write() method of a molecule, if multiple molecules
are to be written to the same file you should use the Outputfile
class.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwrite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False, opt=None):
if opt is None:
opt = {}
self.format = format
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError(
"%s already exists. Use 'overwrite=True' to overwrite it." %
self.filename)
self.obConversion = ob.OBConversion()
formatok = self.obConversion.SetOutFormat(self.format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" %
format)
if filename:
if isinstance(filename, bytes):
gzextension = b'.gz'
else:
gzextension = '.gz'
if os.path.splitext(filename)[1] == gzextension:
self.obconversion.AddOption('z', self.obConversion.GENOPTIONS)
for k, v in opt.items():
if v is None:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS)
else:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS, str(v))
self.total = 0 # The total number of molecules written to the file
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError("Outputfile instance is closed.")
if self.total == 0:
self.obConversion.WriteFile(molecule.OBMol, self.filename)
else:
self.obConversion.Write(molecule.OBMol)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.obConversion.CloseOutFile()
self.filename = None
class Molecule(object):
"""Represent a Pybel Molecule.
Required parameter:
OBMol -- an Open Babel OBMol or any type of cinfony Molecule
Attributes:
atoms, charge, conformers, data, dim, energy, exactmass, formula,
molwt, spin, sssr, title, unitcell.
(refer to the Open Babel library documentation for more info).
Methods:
addh(), calcfp(), calcdesc(), draw(), localopt(), make2D(), make3D()
calccharges(), removeh(), write()
The underlying Open Babel molecule can be accessed using the attribute:
OBMol
"""
_cinfony = True
def __init__(self, OBMol):
if hasattr(OBMol, "_cinfony"):
a, b = OBMol._exchange
if a == 0:
mol = readstring("smi", b)
else:
mol = readstring("mol", b)
OBMol = mol.OBMol
self.OBMol = OBMol
@property
def atoms(self):
return [Atom(self.OBMol.GetAtom(i + 1))
for i in range(self.OBMol.NumAtoms())]
@property
def residues(self):
return [Residue(res) for res in ob.OBResidueIter(self.OBMol)]
@property
def charge(self):
return self.OBMol.GetTotalCharge()
@property
def conformers(self):
return self.OBMol.GetConformers()
@property
def data(self):
return MoleculeData(self.OBMol)
@property
def dim(self):
return self.OBMol.GetDimension()
@property
def energy(self):
return self.OBMol.GetEnergy()
@property
def exactmass(self):
return self.OBMol.GetExactMass()
@property
def formula(self):
return self.OBMol.GetFormula()
@property
def molwt(self):
return self.OBMol.GetMolWt()
@property
def spin(self):
return self.OBMol.GetTotalSpinMultiplicity()
@property
def sssr(self):
return self.OBMol.GetSSSR()
def _gettitle(self):
return self.OBMol.GetTitle()
def _settitle(self, val):
self.OBMol.SetTitle(val)
title = property(_gettitle, _settitle)
@property
def unitcell(self):
unitcell_index = _obconsts.UnitCell
if sys.platform[:3] == "cli":
unitcell_index = System.UInt32(unitcell_index)
unitcell = self.OBMol.GetData(unitcell_index)
if unitcell:
if sys.platform[:3] != "cli":
return _obfuncs.toUnitCell(unitcell)
else:
return unitcell.Downcast[ob.OBUnitCell]()
else:
raise AttributeError("Molecule has no attribute 'unitcell'")
@property
def clone(self):
return Molecule(ob.OBMol(self.OBMol))
@property
def _exchange(self):
if self.OBMol.HasNonZeroCoords():
return (1, self.write("mol"))
else:
return (0, self.write("can").split()[0])
def __iter__(self):
"""Iterate over the Atoms of the Molecule.
This allows constructions such as the following:
for atom in mymol:
print atom
"""
return iter(self.atoms)
def _repr_svg_(self):
"""For IPython notebook, renders 2D pybel.Molecule SVGs."""
# Returning None defers to _repr_javascript_
if ipython_3d:
return None
# Open babel returns a nested svg, which IPython unpacks and treats as
# two SVGs, messing with the display location. This parses out the
# inner svg before handing over to IPython.
namespace = "http://www.w3.org/2000/svg"
ET.register_namespace("", namespace)
obsvg = self.clone.write("svg")
tree = ET.fromstring(obsvg)
svg = tree.find("{{{ns}}}g/{{{ns}}}svg".format(ns=namespace))
return ET.tostring(svg).decode("utf-8")
def _repr_html_(self):
"""For IPython notebook, renders 3D pybel.Molecule webGL objects."""
# Returning None defers to _repr_svg_
if not ipython_3d:
return None
try:
import imolecule
except ImportError:
raise ImportError("Cannot import 3D rendering. Please install "
"with `pip install imolecule`.")
return imolecule.draw(self.clone, format="pybel", display_html=False)
def calcdesc(self, descnames=[]):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
if not descnames:
descnames = descs
ans = {}
for descname in descnames:
try:
desc = _descdict[descname]
except KeyError:
raise ValueError(("%s is not a recognised Open Babel "
"descriptor type") % descname)
ans[descname] = desc.Predict(self.OBMol)
return ans
def calcfp(self, fptype="FP2"):
"""Calculate a molecular fingerprint.
Optional parameters:
fptype -- the fingerprint type (default is "FP2"). See the
fps variable for a list of of available fingerprint
types.
"""
if sys.platform[:3] == "cli":
fp = ob.VectorUInt()
else:
fp = ob.vectorUnsignedInt()
fptype = fptype.lower()
try:
fingerprinter = _fingerprinters[fptype]
except KeyError:
raise ValueError(
"%s is not a recognised Open Babel Fingerprint type" % fptype)
fingerprinter.GetFingerprint(self.OBMol, fp)
return Fingerprint(fp)
def calccharges(self, model="mmff94"):
"""Estimates atomic partial charges in the molecule.
Optional parameters:
model -- default is "mmff94". See the charges variable for a list
of available charge models (in shell, `obabel -L charges`)
This method populates the `partialcharge` attribute of each atom
in the molecule in place.
"""
model = model.lower()
try:
charge_model = _charges[model]
except KeyError:
raise ValueError(
"%s is not a recognised Open Babel Charge Model type" % model)
success = charge_model.ComputeCharges(self.OBMol)
if not success:
errors = ob.obErrorLog.GetMessagesOfLevel(ob.obError)
error = errors[-1] if errors else "Molecule failed to charge."
raise Exception(error)
return [atom.partialcharge for atom in self.atoms]
def write(self, format="smi", filename=None, overwrite=False, opt=None):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the informats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format specific options
For format options with no parameters, specify the
value as None.
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
To write multiple molecules to the same file you should use
the Outputfile class.
"""
if opt is None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" %
format)
if filename:
if isinstance(filename, bytes):
gzextension = b'.gz'
else:
gzextension = '.gz'
if os.path.splitext(filename)[1] == gzextension:
obconversion.AddOption('z', self.obConversion.GENOPTIONS)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.OUTOPTIONS)
else:
obconversion.AddOption(k, obconversion.OUTOPTIONS, str(v))
if filename:
if not overwrite and os.path.isfile(filename):
raise IOError(("%s already exists. Use 'overwrite=True' to "
"overwrite it.") % filename)
obconversion.WriteFile(self.OBMol, filename)
obconversion.CloseOutFile()
else:
return obconversion.WriteString(self.OBMol)
def localopt(self, forcefield="mmff94", steps=500):
"""Locally optimize the coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 500
If the molecule does not have any coordinates, make3D() is
called before the optimization. Note that the molecule needs
to have explicit hydrogens. If not, call addh().
"""
forcefield = forcefield.lower()
if self.dim != 3:
self.make3D(forcefield)
ff = _forcefields[forcefield]
success = ff.Setup(self.OBMol)
if not success:
return
ff.SteepestDescent(steps)
ff.GetCoordinates(self.OBMol)
def make2D(self):
"""Generate 2D coordinates."""
_operations['gen2D'].Do(self.OBMol)
def make3D(self, forcefield="mmff94", steps=50):
"""Generate 3D coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 50
Once coordinates are generated, hydrogens are added and a quick
local optimization is carried out with 50 steps and the
MMFF94 forcefield. Call localopt() if you want
to improve the coordinates further.
"""
forcefield = forcefield.lower()
_builder.Build(self.OBMol)
self.addh()
self.localopt(forcefield, steps)
def addh(self):
"""Add hydrogens."""
self.OBMol.AddHydrogens()
def removeh(self):
"""Remove hydrogens."""
self.OBMol.DeleteHydrogens()
def convertdbonds(self):
"""Convert Dative Bonds."""
self.OBMol.ConvertDativeBonds()
def __str__(self):
return self.write()
def draw(self, show=True, filename=None, update=False, usecoords=False):
"""Create a 2D depiction of the molecule.
Optional parameters:
show -- display on screen (default is True)
filename -- write to file (default is None)
update -- update the coordinates of the atoms to those
determined by the structure diagram generator
(default is False)
usecoords -- don't calculate 2D coordinates, just use
the current coordinates (default is False)
Tkinter and Python Imaging Library are required for image display.
"""
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat("_png2")
if not formatok:
raise ImportError("PNG depiction support not found. You should "
"compile Open Babel with support for Cairo. See "
"installation instructions for more "
"information.")
# Need to copy to avoid removing hydrogens from self
workingmol = Molecule(ob.OBMol(self.OBMol))
workingmol.removeh()
if not usecoords:
_operations['gen2D'].Do(workingmol.OBMol)
if update:
if workingmol.OBMol.NumAtoms() != self.OBMol.NumAtoms():
raise RuntimeError("It is not possible to update the original "
"molecule with the calculated coordinates, "
"as the original molecule contains "
"explicit hydrogens for which no "
"coordinates have been calculated.")
else:
for i in range(workingmol.OBMol.NumAtoms()):
self.OBMol.GetAtom(i + 1).SetVector(
workingmol.OBMol.GetAtom(i + 1).GetVector())
if filename:
filedes = None
else:
if sys.platform[:3] == "cli" and show:
raise RuntimeError("It is only possible to show the molecule "
"if you provide a filename. The reason for "
"this is that I kept having problems "
"when using temporary files.")
filedes, filename = tempfile.mkstemp()
workingmol.write("_png2", filename=filename, overwrite=True)
if show:
if sys.platform[:4] == "java":
image = javax.imageio.ImageIO.read(java.io.File(filename))
frame = javax.swing.JFrame(visible=1)
frame.getContentPane().add(
javax.swing.JLabel(javax.swing.ImageIcon(image)))
frame.setSize(300, 300)
frame.setDefaultCloseOperation(
javax.swing.WindowConstants.DISPOSE_ON_CLOSE)
frame.show()
elif sys.platform[:3] == "cli":
form = _MyForm()
form.setup(filename, self.title)
Application.Run(form)
else:
if not tk:
raise ImportError("Tkinter or Python Imaging Library not "
"found, but is required for image "
"display. See installation instructions "
"for more information.")
root = tk.Tk()
root.title((hasattr(self, "title") and self.title)
or self.__str__().rstrip())
frame = tk.Frame(root, colormap="new",
visual='truecolor').pack()
image = PIL.open(filename)
imagedata = piltk.PhotoImage(image)
tk.Label(frame, image=imagedata).pack()
tk.Button(root, text="Close", command=root.destroy).pack(
fill=tk.X)
root.mainloop()
if filedes:
os.close(filedes)
os.remove(filename)
class Atom(object):
"""Represent a Pybel atom.
Required parameter:
OBAtom -- an Open Babel OBAtom
Attributes:
atomicmass, atomicnum, cidx, coords, coordidx, exactmass,
formalcharge, heavyvalence, heterovalence, hyb, idx,
implicitvalence, isotope, partialcharge, residue, spin, type,
valence, vector.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBAtom
"""
def __init__(self, OBAtom):
self.OBAtom = OBAtom
@property
def coords(self):
return (self.OBAtom.GetX(), self.OBAtom.GetY(), self.OBAtom.GetZ())
@property
def atomicmass(self):
return self.OBAtom.GetAtomicMass()
@property
def atomicnum(self):
return self.OBAtom.GetAtomicNum()
@property
def cidx(self):
return self.OBAtom.GetCIdx()
@property
def coordidx(self):
return self.OBAtom.GetCoordinateIdx()
@property
def exactmass(self):
return self.OBAtom.GetExactMass()
@property
def formalcharge(self):
return self.OBAtom.GetFormalCharge()
@property
def heavyvalence(self):
return self.OBAtom.GetHvyValence()
@property
def heterovalence(self):
return self.OBAtom.GetHeteroValence()
@property
def hyb(self):
return self.OBAtom.GetHyb()
@property
def idx(self):
return self.OBAtom.GetIdx()
@property
def implicitvalence(self):
return self.OBAtom.GetImplicitValence()
@property
def isotope(self):
return self.OBAtom.GetIsotope()
@property
def partialcharge(self):
return self.OBAtom.GetPartialCharge()
@property
def residue(self):
return Residue(self.OBAtom.GetResidue())
@property
def spin(self):
return self.OBAtom.GetSpinMultiplicity()
@property
def type(self):
return self.OBAtom.GetType()
@property
def valence(self):
return self.OBAtom.GetValence()
@property
def vector(self):
return self.OBAtom.GetVector()
def __str__(self):
c = self.coords
return "Atom: %d (%.2f %.2f %.2f)" % (self.atomicnum, c[0], c[1], c[2])
class Residue(object):
"""Represent a Pybel residue.
Required parameter:
OBResidue -- an Open Babel OBResidue
Attributes:
atoms, idx, name.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBResidue
"""
def __init__(self, OBResidue):
self.OBResidue = OBResidue
@property
def atoms(self):
return [Atom(atom) for atom in ob.OBResidueAtomIter(self.OBResidue)]
@property
def idx(self):
return self.OBResidue.GetIdx()
@property
def name(self):
return self.OBResidue.GetName()
def __iter__(self):
"""Iterate over the Atoms of the Residue.
This allows constructions such as the following:
for atom in residue:
print atom
"""
return iter(self.atoms)
def _findbits(fp, bitsperint):
"""Find which bits are set in a list/vector.
This function is used by the Fingerprint class.
>>> _findbits([13, 71], 8)
[1, 3, 4, 9, 10, 11, 15]
"""
ans = []
start = 1
if sys.platform[:4] == "java":
fp = [fp.get(i) for i in range(fp.size())]
for x in fp:
i = start
while x > 0:
if x % 2:
ans.append(i)
x >>= 1
i += 1
start += bitsperint
return ans
class Fingerprint(object):
"""A Molecular Fingerprint.
Required parameters:
fingerprint -- a vector calculated by OBFingerprint.FindFingerprint()
Attributes:
fp -- the underlying fingerprint object
bits -- a list of bits set in the Fingerprint
Methods:
The "|" operator can be used to calculate the Tanimoto coeff. For
example, given two Fingerprints 'a', and 'b', the Tanimoto coefficient
is given by:
tanimoto = a | b
"""
def __init__(self, fingerprint):
self.fp = fingerprint
def __or__(self, other):
return ob.OBFingerprint.Tanimoto(self.fp, other.fp)
@property
def bits(self):
return _findbits(self.fp, ob.OBFingerprint.Getbitsperint())
def __str__(self):
fp = self.fp
if sys.platform[:4] == "java":
fp = [self.fp.get(i) for i in range(self.fp.size())]
return ", ".join([str(x) for x in fp])
class Smarts(object):
"""A Smarts Pattern Matcher
Required parameters:
smartspattern
Methods:
findall(molecule)
Example:
>>> mol = readstring("smi","CCN(CC)CC") # triethylamine
>>> smarts = Smarts("[#6][#6]") # Matches an ethyl group
>>> print smarts.findall(mol)
[(1, 2), (4, 5), (6, 7)]
The numbers returned are the indices (starting from 1) of the atoms
that match the SMARTS pattern. In this case, there are three matches
for each of the three ethyl groups in the molecule.
"""
def __init__(self, smartspattern):
"""Initialise with a SMARTS pattern."""
self.obsmarts = ob.OBSmartsPattern()
success = self.obsmarts.Init(smartspattern)
if not success:
raise IOError("Invalid SMARTS pattern")
def findall(self, molecule):
"""Find all matches of the SMARTS pattern to a particular molecule.
Required parameters:
molecule
"""
self.obsmarts.Match(molecule.OBMol)
vector = self.obsmarts.GetUMapList()
if sys.platform[:4] == "java":
vector = [vector.get(i) for i in range(vector.size())]
return list(vector)
class MoleculeData(object):
"""Store molecule data in a dictionary-type object
Required parameters:
obmol -- an Open Babel OBMol
Methods and accessor methods are like those of a dictionary except
that the data is retrieved on-the-fly from the underlying OBMol.
Example:
>>> mol = readfile("sdf", 'head.sdf').next() # Python 2
>>> # mol = next(readfile("sdf", 'head.sdf')) # Python 3
>>> data = mol.data
>>> print data
{'Comment': 'CORINA 2.61 0041 25.10.2001', 'NSC': '1'}
>>> print len(data), data.keys(), data.has_key("NSC")
2 ['Comment', 'NSC'] True
>>> print data['Comment']
CORINA 2.61 0041 25.10.2001
>>> data['Comment'] = 'This is a new comment'
>>> for k,v in data.items():
... print k, "-->", v
Comment --> This is a new comment
NSC --> 1
>>> del data['NSC']
>>> print len(data), data.keys(), data.has_key("NSC")
1 ['Comment'] False
"""
def __init__(self, obmol):
self._mol = obmol
def _data(self):
data = self._mol.GetData()
if sys.platform[:4] == "java":
data = [data.get(i) for i in range(data.size())]
answer = [x for x in data if
x.GetDataType() == _obconsts.PairData or
x.GetDataType() == _obconsts.CommentData]
if sys.platform[:3] != "cli":
answer = [_obfuncs.toPairData(x) for x in answer]
return answer
def _testforkey(self, key):
if key not in self:
raise KeyError("'%s'" % key)
def keys(self):
return [x.GetAttribute() for x in self._data()]
def values(self):
return [x.GetValue() for x in self._data()]
def items(self):
return iter(zip(self.keys(), self.values()))
def __iter__(self):
return iter(self.keys())
def iteritems(self): # Can remove for Python 3
return self.items()
def __len__(self):
return len(self._data())
def __contains__(self, key):
return self._mol.HasData(key)
def __delitem__(self, key):
self._testforkey(key)
self._mol.DeleteData(self._mol.GetData(key))
def clear(self):
for key in self:
del self[key]
def has_key(self, key):
return key in self
def update(self, dictionary):
for k, v in dictionary.items():
self[k] = v
def __getitem__(self, key):
self._testforkey(key)
answer = self._mol.GetData(key)
if sys.platform[:3] != "cli":
answer = _obfuncs.toPairData(answer)
return answer.GetValue()
def __setitem__(self, key, value):
if key in self:
if sys.platform[:3] != "cli":
pairdata = _obfuncs.toPairData(self._mol.GetData(key))
else:
pairdata = self._mol.GetData(key).Downcast[ob.OBPairData]()
pairdata.SetValue(str(value))
else:
pairdata = ob.OBPairData()
pairdata.SetAttribute(key)
pairdata.SetValue(str(value))
self._mol.CloneData(pairdata)
def __repr__(self):
return dict(self.items()).__repr__()
if sys.platform[:3] == "cli":
class _MyForm(Form):
def __init__(self):
Form.__init__(self)
def setup(self, filename, title):
# adjust the form's client area size to the picture
self.ClientSize = Size(300, 300)
self.Text = title
self.filename = filename
self.image = Image.FromFile(self.filename)
pictureBox = PictureBox()
# this will fit the image to the form
pictureBox.SizeMode = PictureBoxSizeMode.StretchImage
pictureBox.Image = self.image
# fit the picture box to the frame
pictureBox.Dock = DockStyle.Fill
self.Controls.Add(pictureBox)
self.Show()
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod(verbose=True)
| gpl-2.0 |
baroquebobcat/pants | src/python/pants/engine/round_manager.py | 13 | 3680 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import defaultdict, namedtuple
from pants.goal.goal import Goal
class ProducerInfo(namedtuple('ProducerInfo', ['product_type', 'task_type', 'goal'])):
"""Describes the producer of a given product type."""
class RoundManager(object):
"""
:API: public
"""
class MissingProductError(KeyError):
"""Indicates a required product type is provided by non-one."""
@staticmethod
def _index_products():
producer_info_by_product_type = defaultdict(set)
for goal in Goal.all():
for task_type in goal.task_types():
for product_type in task_type.product_types():
producer_info = ProducerInfo(product_type, task_type, goal)
producer_info_by_product_type[product_type].add(producer_info)
return producer_info_by_product_type
def __init__(self, context):
self._dependencies = set()
self._optional_dependencies = set()
self._context = context
self._producer_infos_by_product_type = None
def require(self, product_type):
"""Schedules the tasks that produce product_type to be executed before the requesting task.
There must be at least one task that produces the required product type, or the
dependencies will not be satisfied.
:API: public
"""
self._dependencies.add(product_type)
self._context.products.require(product_type)
def optional_product(self, product_type):
"""Schedules tasks, if any, that produce product_type to be executed before the requesting task.
There need not be any tasks that produce the required product type. All this method
guarantees is that if there are any then they will be executed before the requesting task.
:API: public
"""
self._optional_dependencies.add(product_type)
self.require(product_type)
def require_data(self, product_type):
"""Schedules the tasks that produce product_type to be executed before the requesting task.
There must be at least one task that produces the required product type, or the
dependencies will not be satisfied.
:API: public
"""
self._dependencies.add(product_type)
self._context.products.require_data(product_type)
def optional_data(self, product_type):
"""Schedules tasks, if any, that produce product_type to be executed before the requesting task.
There need not be any tasks that produce the required product type. All this method
guarantees is that if there are any then they will be executed before the requesting task.
:API: public
"""
self._optional_dependencies.add(product_type)
self.require_data(product_type)
def get_dependencies(self):
"""Returns the set of data dependencies as producer infos corresponding to data requirements."""
producer_infos = set()
for product_type in self._dependencies:
producer_infos.update(self._get_producer_infos_by_product_type(product_type))
return producer_infos
def _get_producer_infos_by_product_type(self, product_type):
if self._producer_infos_by_product_type is None:
self._producer_infos_by_product_type = self._index_products()
producer_infos = self._producer_infos_by_product_type[product_type]
if not producer_infos and product_type not in self._optional_dependencies:
raise self.MissingProductError("No producers registered for '{0}'".format(product_type))
return producer_infos
| apache-2.0 |
steventimberman/masterDebater | venv/lib/python2.7/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| mit |
eaas-framework/virtualbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Ecc/Xml/XmlRoutines.py | 11 | 7215 | ## @file
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import xml.dom.minidom
## Create a element of XML
#
# @param Name
# @param String
# @param NodeList
# @param AttributeList
#
# @revel Element
#
def CreateXmlElement(Name, String, NodeList, AttributeList):
Doc = xml.dom.minidom.Document()
Element = Doc.createElement(Name)
if String != '' and String != None:
Element.appendChild(Doc.createTextNode(String))
for Item in NodeList:
if type(Item) == type([]):
Key = Item[0]
Value = Item[1]
if Key != '' and Key != None and Value != '' and Value != None:
Node = Doc.createElement(Key)
Node.appendChild(Doc.createTextNode(Value))
Element.appendChild(Node)
else:
Element.appendChild(Item)
for Item in AttributeList:
Key = Item[0]
Value = Item[1]
if Key != '' and Key != None and Value != '' and Value != None:
Element.setAttribute(Key, Value)
return Element
## Get a list of XML nodes using XPath style syntax.
#
# Return a list of XML DOM nodes from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
# @revel Nodes A list of XML nodes matching XPath style Sting.
#
def XmlList(Dom, String):
if String == None or String == "" or Dom == None or Dom == "":
return []
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Nodes = [Dom]
Index = 0
End = len(TagList) - 1
while Index <= End:
ChildNodes = []
for Node in Nodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == TagList[Index]:
if Index < End:
ChildNodes.extend(Node.childNodes)
else:
ChildNodes.append(Node)
Nodes = ChildNodes
ChildNodes = []
Index += 1
return Nodes
## Get a single XML node using XPath style syntax.
#
# Return a single XML DOM node from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
# @revel Node A single XML node matching XPath style Sting.
#
def XmlNode(Dom, String):
if String == None or String == "" or Dom == None or Dom == "":
return ""
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Index = 0
End = len(TagList) - 1
ChildNodes = [Dom]
while Index <= End:
for Node in ChildNodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == TagList[Index]:
if Index < End:
ChildNodes = Node.childNodes
else:
return Node
break
Index += 1
return ""
## Get a single XML element using XPath style syntax.
#
# Return a single XML element from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
# @revel Element An XML element matching XPath style Sting.
#
def XmlElement(Dom, String):
try:
return XmlNode(Dom, String).firstChild.data.strip()
except:
return ""
## Get a single XML element of the current node.
#
# Return a single XML element specified by the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
# @revel Element An XML element in current root Dom.
#
def XmlElementData(Dom):
try:
return Dom.firstChild.data.strip()
except:
return ""
## Get a list of XML elements using XPath style syntax.
#
# Return a list of XML elements from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM object.
# @param String A XPath style path.
#
# @revel Elements A list of XML elements matching XPath style Sting.
#
def XmlElementList(Dom, String):
return map(XmlElementData, XmlList(Dom, String))
## Get the XML attribute of the current node.
#
# Return a single XML attribute named Attribute from the current root Dom.
# If the input Dom or Attribute is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Attribute The name of Attribute.
#
# @revel Element A single XML element matching XPath style Sting.
#
def XmlAttribute(Dom, Attribute):
try:
return Dom.getAttribute(Attribute).strip()
except:
return ''
## Get the XML node name of the current node.
#
# Return a single XML node name from the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
# @revel Element A single XML element matching XPath style Sting.
#
def XmlNodeName(Dom):
try:
return Dom.nodeName.strip()
except:
return ''
## Parse an XML file.
#
# Parse the input XML file named FileName and return a XML DOM it stands for.
# If the input File is not a valid XML file, then an empty string is returned.
#
# @param FileName The XML file name.
#
# @revel Dom The Dom object achieved from the XML file.
#
def XmlParseFile(FileName):
try:
XmlFile = open(FileName)
Dom = xml.dom.minidom.parse(XmlFile)
XmlFile.close()
return Dom
except Exception, X:
print X
return ""
# This acts like the main() function for the script, unless it is 'import'ed
# into another script.
if __name__ == '__main__':
# Nothing to do here. Could do some unit tests.
A = CreateXmlElement('AAA', 'CCC', [['AAA', '111'], ['BBB', '222']], [['A', '1'], ['B', '2']])
B = CreateXmlElement('ZZZ', 'CCC', [['XXX', '111'], ['YYY', '222']], [['A', '1'], ['B', '2']])
C = CreateXmlList('DDD', 'EEE', [A, B], ['FFF', 'GGG'])
print C.toprettyxml(indent = " ")
pass
| gpl-2.0 |
rizumu/django | tests/m2o_recursive/models.py | 282 | 1047 | """
Relating an object to itself, many-to-one
To define a many-to-one relationship between a model and itself, use
``ForeignKey('self', ...)``.
In this example, a ``Category`` is related to itself. That is, each
``Category`` has a parent ``Category``.
Set ``related_name`` to designate what the reverse relationship is called.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, blank=True, null=True, related_name='child_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Person(models.Model):
full_name = models.CharField(max_length=20)
mother = models.ForeignKey('self', models.SET_NULL, null=True, related_name='mothers_child_set')
father = models.ForeignKey('self', models.SET_NULL, null=True, related_name='fathers_child_set')
def __str__(self):
return self.full_name
| bsd-3-clause |
calebtrahan/KujiIn_Python | utils/Exporter.py | 1 | 8869 | import math
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import QtCore, QtGui
from PyQt4.phonon import Phonon
from pydub import AudioSegment
from main_const import *
class ExportSession():
def __init__(self, main, gui, player):
self.main = main
self.gui = gui
self.player = player
self.exportsession()
def exportsession(self):
playing = (self.player.entrainmentObject.state() == Phonon.PlayingState)
paused = (self.player.entrainmentObject.state() == Phonon.PausedState)
if playing or paused:
quit_msg = "Stop The Loaded Session To Export The Session?"
reply = QtGui.QMessageBox.question(self, 'Confirmation To Stop Session For Export',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.player.entrainmentObject.stop()
if self.gui.AmbienceOption.isChecked():
self.player.ambienceObject.stop()
self.gui.statusBar.showMessage("Session Stopped For Export")
elif not self.main.sessioncreated:
QtGui.QMessageBox.information(None, "No Session Created",
"A Session Must Be Created In Order To Export",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
elif self.main.sessioncreated:
self.finalentrainmentlist = self.main.entrainmentplayer.cutstoplay
if self.gui.AmbienceOption.isChecked():
self.finalambiencelist = self.main.ambienceplayer.finalambience
else:
self.finalambiencelist = None
self.totalshittodo = (len(self.main.cutsinsession) + 1)
self.exportfilename = QtGui.QFileDialog.getSaveFileName(self.gui, "Where Do You Want Me To Export This Session?",
'', "Music Files (*.mp3);;All Files (*)")
ExportSessionDialog(self.gui, self.main, self)
class ExportThread(QThread):
def __init__(self, entrainmentfiles, ambiencefiles, cutsinsession, exportfile):
QThread.__init__(self)
self.entrainmentfiles = entrainmentfiles
self.ambiencefiles = ambiencefiles
self.exportfile = exportfile
self.cutsinsession = cutsinsession
self.finallist = list()
self.settoexit = False
def run(self):
while True:
totallength = int()
if self.ambiencefiles is not None: # Ambience And Entrainment
alertactual = AudioSegment.from_mp3(ALERTFILE)
alertnotactual = AudioSegment.from_mp3(ALERTSILENCE)
alertogg = alertactual.overlay(alertnotactual)
for x, i in enumerate(self.entrainmentfiles):
self.emit(SIGNAL("asignal"), self.cutsinsession[x]["name"])
nextentrainment = AudioSegment.from_mp3(i)
nextambience = AudioSegment.from_mp3(self.ambiencefiles[x])
output = nextentrainment.overlay(nextambience)
tempoutputname = os.path.join(TEMPDIRECTORY, "Export", str(x) + ".mp3")
output.export(tempoutputname, format="mp3")
if x == 0:
finallist = output
finallist += alertogg
totallength += len(alertogg)
else:
finallist += output
if i != self.entrainmentfiles[-1]:
finallist += alertogg
totallength += len(alertogg)
totallength += len(output)
self.emit(SIGNAL("asignal"), True)
exportfilename = str(self.exportfile)
finallist.export(exportfilename, format="mp3")
else: # Entrainment Only
alertactual = AudioSegment.from_mp3(ALERTFILE)
alertnotactual = AudioSegment.from_mp3(ALERTSILENCE)
alertogg = alertactual.overlay(alertnotactual)
for x, i in enumerate(self.entrainmentfiles):
self.emit(SIGNAL("asignal"), self.cutsinsession[x]["name"])
if x == 0:
nextentrainment = AudioSegment.from_mp3(i)
nextentrainment += alertogg
#totallength += len(alertogg)
else:
nextentrainment += AudioSegment.from_mp3(i)
if i != self.entrainmentfiles[-1]:
nextentrainment += alertogg
#totallength += len(alertogg))
# self.emit(SIGNAL("asignal"))
self.emit(SIGNAL("asignal"), True)
totallength = len(nextentrainment)
exportfilename = str(self.exportfile)
nextentrainment.export(exportfilename, format="mp3")
exportfiled = AudioSegment.from_mp3(exportfilename)
exportfileminutes = math.trunc((len(exportfiled) / 1000) / 60)
totallengthminutes = math.trunc((totallength / 1000) / 60)
if exportfileminutes == totallengthminutes:
self.emit(SIGNAL("asignal"), False)
[os.remove(os.path.join(TEMPDIRECTORY, 'Export', x)) for x in os.listdir(os.path.join(TEMPDIRECTORY, 'Export'))]
break
else:
print('The Exported File With Length %s Does Not Match The Totallength Variable With Length %s' %
(exportfileminutes, totallengthminutes))
print("Trying Export Again...")
continue
class ExportSessionDialog(QDialog):
def __init__(self, maingui, mainprogram, exporter):
QDialog.__init__(self, maingui)
self.percentsessioncreated = int()
self.totalshittodo = int()
self.resize(391, 114)
self.main = mainprogram
self.main.sessioncreated = False
self.gui = maingui
self.progress = int()
self.exporter = exporter
self.exportsessionProgressBar = QtGui.QProgressBar(self)
self.exportsessionProgressBar.setGeometry(QtCore.QRect(20, 40, 361, 23))
self.exportsessionCancelButton = QtGui.QPushButton(self)
self.exportsessionCancelButton.setGeometry(QtCore.QRect(300, 70, 84, 30))
self.topLabel = QtGui.QLabel(self)
self.topLabel.setGeometry(QtCore.QRect(20, 10, 351, 20))
self.exportStatusUpdate = QtGui.QLabel(self)
self.exportStatusUpdate.setGeometry(QtCore.QRect(20, 70, 271, 16))
self.setWindowTitle("Exporting Session")
self.topLabel.setText("(This May Take A While)")
self.exportsessionCancelButton.setText("Cancel")
self.exportsessionProgressBar.setValue(0)
QtCore.QObject.connect(self.exportsessionCancelButton, QtCore.SIGNAL("clicked()"), self.cancelsessioncreation)
QtCore.QObject.connect(self.exportsessionProgressBar, QtCore.SIGNAL("valueChanged(int)"), self.seeifexportdone)
self.exec_()
def cancelsessioncreation(self):
self.close()
def seeifexportdone(self):
if self.exportsessionProgressBar.value() == 100:
self.gui.statusBar.showMessage("Session Exported To: ( %s )" % self.exporter.exportfilename)
self.accept()
def callthreads(self):
self.sessionexporter = ExportThread(self.exporter.finalentrainmentlist,
self.exporter.finalambiencelist,
self.main.cutsinsession,
self.exporter.exportfilename)
QObject.connect(self.sessionexporter,SIGNAL("asignal"),self.sessionexporterupdates,Qt.QueuedConnection)
self.sessionexporter.start()
self.gui.statusBar.showMessage("Exporting Session...")
def sessionexporterupdates(self, cutname):
percent = 100 / (self.exporter.totalshittodo + 1)
self.progress += percent
self.exportsessionProgressBar.setValue(int(self.progress))
if isinstance(cutname, str):
self.exportStatusUpdate.setText("Building %s..." % cutname)
else:
if cutname:
self.exportStatusUpdate.setText("Building Final Session File...")
else:
pass
# Reset To 0
# Print To Label...An Error Occured, Retrying
def showEvent(self, QShowEvent):
self.callthreads()
def closeEvent(self, QCloseEvent):
self.sessionexporter.settoexit = True
self.gui.statusBar.showMessage("Session Exporter Cancelled", 3000)
self.accept() | mit |
highweb-project/highweb-webcl-html5spec | build/android/devil/android/device_temp_file.py | 13 | 1868 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A temp file that automatically gets pushed and deleted from a device."""
# pylint: disable=W0622
import posixpath
import random
import threading
from devil.android import device_errors
from devil.utils import cmd_helper
class DeviceTempFile(object):
def __init__(self, adb, suffix='', prefix='temp_file', dir='/data/local/tmp'):
"""Find an unused temporary file path on the device.
When this object is closed, the file will be deleted on the device.
Args:
adb: An instance of AdbWrapper
suffix: The suffix of the name of the temp file.
prefix: The prefix of the name of the temp file.
dir: The directory on the device where to place the temp file.
"""
self._adb = adb
# Python's random module use 52-bit numbers according to its docs.
random_hex = hex(random.randint(0, 2 ** 52))[2:]
self.name = posixpath.join(dir, '%s-%s%s' % (prefix, random_hex, suffix))
self.name_quoted = cmd_helper.SingleQuote(self.name)
def close(self):
"""Deletes the temporary file from the device."""
# ignore exception if the file is already gone.
def delete_temporary_file():
try:
self._adb.Shell('rm -f %s' % self.name_quoted, expect_status=None)
except device_errors.AdbCommandFailedError:
# file does not exist on Android version without 'rm -f' support (ICS)
pass
# It shouldn't matter when the temp file gets deleted, so do so
# asynchronously.
threading.Thread(
target=delete_temporary_file,
name='delete_temporary_file(%s)' % self._adb.GetDeviceSerial()).start()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| bsd-3-clause |
kevalds51/sympy | sympy/sets/sets.py | 17 | 57589 | from __future__ import print_function, division
from itertools import product
from sympy.core.sympify import _sympify, sympify
from sympy.core.basic import Basic
from sympy.core.singleton import Singleton, S
from sympy.core.evalf import EvalfMixin
from sympy.core.numbers import Float
from sympy.core.compatibility import iterable, with_metaclass, ordered, range
from sympy.core.evaluate import global_evaluate
from sympy.core.decorators import deprecated
from sympy.core.mul import Mul
from sympy.core.relational import Eq
from sympy.sets.contains import Contains
from mpmath import mpi, mpf
from sympy.logic.boolalg import And, Or, Not, true, false
from sympy.utilities import subsets
class Set(Basic):
"""
The base class for any kind of set.
This is not meant to be used directly as a container of items. It does not
behave like the builtin ``set``; see :class:`FiniteSet` for that.
Real intervals are represented by the :class:`Interval` class and unions of
sets by the :class:`Union` class. The empty set is represented by the
:class:`EmptySet` class and available as a singleton as ``S.EmptySet``.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None
is_EmptySet = None
is_UniversalSet = None
is_Complement = None
is_ComplexRegion = False
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
"""
Returns the union of 'self' and 'other'.
Examples
========
As a shortcut it is possible to use the '+' operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
[0, 1] U [2, 3]
>>> Interval(0, 1) + Interval(2, 3)
[0, 1] U [2, 3]
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
(1, 2] U {3}
Similarly it is possible to use the '-' operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
(1, 2]
>>> Interval(1, 3) - FiniteSet(2)
[1, 2) U (2, 3]
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
[1, 2]
>>> from sympy import imageset, Lambda, symbols, S
>>> n, m = symbols('n m')
>>> a = imageset(Lambda(n, 2*n), S.Integers)
>>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers))
EmptySet()
"""
return Intersection(self, other)
def intersection(self, other):
"""
Alias for :meth:`intersect()`
"""
return self.intersect(other)
def _intersect(self, other):
"""
This function should only be used internally
self._intersect(other) returns a new, intersected set if self knows how
to intersect itself with other, otherwise it returns ``None``
When making a new set class you can be assured that other will not
be a :class:`Union`, :class:`FiniteSet`, or :class:`EmptySet`
Used within the :class:`Intersection` class
"""
return None
def is_disjoint(self, other):
"""
Returns True if 'self' and 'other' are disjoint
Examples
========
>>> from sympy import Interval
>>> Interval(0, 2).is_disjoint(Interval(1, 2))
False
>>> Interval(0, 2).is_disjoint(Interval(3, 4))
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Disjoint_sets
"""
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
"""
Alias for :meth:`is_disjoint()`
"""
return self.is_disjoint(other)
def _union(self, other):
"""
This function should only be used internally
self._union(other) returns a new, joined set if self knows how
to join itself with other, otherwise it returns ``None``.
It may also return a python set of SymPy Sets if they are somehow
simpler. If it does this it must be idempotent i.e. the sets returned
must return ``None`` with _union'ed with each other
Used within the :class:`Union` class
"""
return None
def complement(self, universe):
"""
The complement of 'self' w.r.t the given the universe.
Examples
========
>>> from sympy import Interval, S
>>> Interval(0, 1).complement(S.Reals)
(-oo, 0) U (1, oo)
>>> Interval(0, 1).complement(S.UniversalSet)
UniversalSet() \ [0, 1]
"""
return Complement(universe, self)
def _complement(self, other):
# this behaves as other - self
if isinstance(other, ProductSet):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a
# ProductSet
# XXX: this doesn't work if the dimentions of the sets isn't same.
# A - B is essentially same as A if B has a different
# dimentionality than A
switch_sets = ProductSet(FiniteSet(o, o - s) for s, o in
zip(self.sets, other.sets))
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != other)
elif isinstance(other, Interval):
if isinstance(self, Interval) or isinstance(self, FiniteSet):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(o - self for o in other.args)
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self))
elif isinstance(other, EmptySet):
return S.EmptySet
elif isinstance(other, FiniteSet):
return FiniteSet(*[el for el in other if self.contains(el) != True])
def symmetric_difference(self, other):
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
"""
The infimum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns True if 'other' is contained in 'self' as an element.
As a shortcut it is possible to use the 'in' operator:
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).contains(0.5)
True
>>> 0.5 in Interval(0, 1)
True
"""
other = sympify(other, strict=True)
ret = self._contains(other)
if ret is None:
if all(Eq(i, other) == False for i in self):
return False
ret = Contains(other, self, evaluate=False)
return ret
def _contains(self, other):
raise NotImplementedError("(%s)._contains(%s)" % (self, other))
@deprecated(useinstead="is_subset", issue=7460, deprecated_since_version="0.7.6")
def subset(self, other):
"""
Returns True if 'other' is a subset of 'self'.
"""
return other.is_subset(self)
def is_subset(self, other):
"""
Returns True if 'self' is a subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True))
False
"""
if isinstance(other, Set):
return self.intersect(other) == self
else:
raise ValueError("Unknown argument '%s'" % other)
def issubset(self, other):
"""
Alias for :meth:`is_subset()`
"""
return self.is_subset(other)
def is_proper_subset(self, other):
"""
Returns True if 'self' is a proper subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_proper_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_proper_subset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
"""
Returns True if 'self' is a superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_superset(Interval(0, 1))
False
>>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True))
True
"""
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
def issuperset(self, other):
"""
Alias for :meth:`is_superset()`
"""
return self.is_superset(other)
def is_proper_superset(self, other):
"""
Returns True if 'self' is a proper superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_proper_superset(Interval(0, 0.5))
True
>>> Interval(0, 1).is_proper_superset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
raise NotImplementedError('Power set not defined for: %s' % self.func)
def powerset(self):
"""
Find the Power set of 'self'.
Examples
========
>>> from sympy import FiniteSet, EmptySet
>>> A = EmptySet()
>>> A.powerset()
{EmptySet()}
>>> A = FiniteSet(1, 2)
>>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)
>>> A.powerset() == FiniteSet(a, b, c, EmptySet())
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Power_set
"""
return self._eval_powerset()
@property
def measure(self):
"""
The (Lebesgue) measure of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def boundary(self):
"""
The boundary or frontier of a set
A point x is on the boundary of a set S if
1. x is in the closure of S.
I.e. Every neighborhood of x contains a point in S.
2. x is not in the interior of S.
I.e. There does not exist an open set centered on x contained
entirely within S.
There are the points on the outer rim of S. If S is open then these
points need not actually be contained within S.
For example, the boundary of an interval is its start and end points.
This is true regardless of whether or not the interval is open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).boundary
{0, 1}
>>> Interval(0, 1, True, False).boundary
{0, 1}
"""
return self._boundary
@property
def is_open(self):
if not Intersection(self, self.boundary):
return True
# We can't confidently claim that an intersection exists
return None
@property
def is_closed(self):
return self.boundary.is_subset(self)
@property
def closure(self):
return self + self.boundary
@property
def interior(self):
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
def _eval_imageset(self, f):
from sympy.sets.fancysets import ImageSet
return ImageSet(f, self)
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def __add__(self, other):
return self.union(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersect(other)
def __mul__(self, other):
return ProductSet(self, other)
def __xor__(self, other):
return SymmetricDifference(self, other)
def __pow__(self, exp):
if not sympify(exp).is_Integer and exp >= 0:
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet([self]*exp)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
symb = self.contains(other)
if symb not in (true, false):
raise TypeError('contains did not evaluate to a bool: %r' % symb)
return bool(symb)
@property
@deprecated(useinstead="is_subset(S.Reals)", issue=6212, deprecated_since_version="0.7.6")
def is_real(self):
return None
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use '*' operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
[0, 5] x {1, 2, 3}
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
[0, 1] x [0, 1]
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
set([(H, H), (H, T), (T, H), (T, T)])
Notes
=====
- Passes most operations down to the argument sets
- Flattens Products of ProductSets
References
==========
.. [1] http://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
def flatten(arg):
if isinstance(arg, Set):
if arg.is_ProductSet:
return sum(map(flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
sets = flatten(list(sets))
if EmptySet() in sets or len(sets) == 0:
return EmptySet()
if len(sets) == 1:
return sets[0]
return Basic.__new__(cls, *sets, **assumptions)
def _eval_Eq(self, other):
if not other.is_ProductSet:
return
if len(self.args) != len(other.args):
return false
return And(*(Eq(x, y) for x, y in zip(self.args, other.args)))
def _contains(self, element):
"""
'in' operator for ProductSets
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
try:
if len(element) != len(self.args):
return false
except TypeError: # maybe element isn't an iterable
return false
return And(*
[set.contains(item) for set, item in zip(self.sets, element)])
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return S.EmptySet
return ProductSet(a.intersect(b)
for a, b in zip(self.sets, other.sets))
def _union(self, other):
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return None
if self.args[0] == other.args[0]:
return self.args[0] * Union(ProductSet(self.args[1:]),
ProductSet(other.args[1:]))
if self.args[-1] == other.args[-1]:
return Union(ProductSet(self.args[:-1]),
ProductSet(other.args[:-1])) * self.args[-1]
return None
@property
def sets(self):
return self.args
@property
def _boundary(self):
return Union(ProductSet(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets))
for i, a in enumerate(self.sets))
@property
@deprecated(useinstead="is_subset(S.Reals)", issue=6212, deprecated_since_version="0.7.6")
def is_real(self):
return all(set.is_real for set in self.sets)
@property
def is_iterable(self):
return all(set.is_iterable for set in self.sets)
def __iter__(self):
if self.is_iterable:
return product(*self.sets)
else:
raise TypeError("Not all constituent sets are iterable")
@property
def _measure(self):
measure = 1
for set in self.sets:
measure *= set.measure
return measure
def __len__(self):
return Mul(*[len(s) for s in self.args])
class Interval(Set, EvalfMixin):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points "start" and "end".
For left_open=True (default left_open is False) the interval
will be open on the left. Similarly, for right_open=True the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval
>>> Interval(0, 1)
[0, 1]
>>> Interval(0, 1, False, True)
[0, 1)
>>> Interval.Ropen(0, 1)
[0, 1)
>>> Interval.Lopen(0, 1)
(0, 1]
>>> Interval.open(0, 1)
(0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
[0, a]
Notes
=====
- Only real end points are supported
- Interval(a, b) with a > b will return the empty set
- Use the evalf() method to turn an Interval into an mpmath
'mpi' interval instance
References
==========
.. [1] http://en.wikipedia.org/wiki/Interval_%28mathematics%29
"""
is_Interval = True
@property
@deprecated(useinstead="is_subset(S.Reals)", issue=6212, deprecated_since_version="0.7.6")
def is_real(self):
return True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
inftys = [S.Infinity, S.NegativeInfinity]
# Only allow real intervals (use symbols with 'is_real=True').
if not all(i.is_real is not False or i in inftys for i in (start, end)):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if (end < start) == True:
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start == S.NegativeInfinity:
left_open = true
if end == S.Infinity:
right_open = true
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of 'self'.
This property takes the same value as the 'inf' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
_inf = left = start
@classmethod
def open(cls, a, b):
"""Return an interval including neither boundary."""
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
"""Return an interval not including the left boundary."""
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
"""Return an interval not including the right boundary."""
return cls(a, b, False, True)
@property
def end(self):
"""
The right end point of 'self'.
This property takes the same value as the 'sup' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
_sup = right = end
@property
def left_open(self):
"""
True if 'self' is left-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if 'self' is right-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
# We only know how to intersect with other intervals
if not other.is_Interval:
return None
# handle (-oo, oo)
infty = S.NegativeInfinity, S.Infinity
if self == Interval(*infty):
l, r = self.left, self.right
if l.is_real or l in infty or r.is_real or r in infty:
return other
# We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0
if not self._is_comparable(other):
return None
empty = False
if self.start <= other.end and other.start <= self.end:
# Get topology right.
if self.start < other.start:
start = other.start
left_open = other.left_open
elif self.start > other.start:
start = self.start
left_open = self.left_open
else:
start = self.start
left_open = self.left_open or other.left_open
if self.end < other.end:
end = self.end
right_open = self.right_open
elif self.end > other.end:
end = other.end
right_open = other.right_open
else:
end = self.end
right_open = self.right_open or other.right_open
if end - start == 0 and (left_open or right_open):
empty = True
else:
empty = True
if empty:
return S.EmptySet
return Interval(start, end, left_open, right_open)
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_Interval and self._is_comparable(other):
from sympy.functions.elementary.miscellaneous import Min, Max
# Non-overlapping intervals
end = Min(self.end, other.end)
start = Max(self.start, other.start)
if (end < start or
(end == start and (end not in self and end not in other))):
return None
else:
start = Min(self.start, other.start)
end = Max(self.end, other.end)
left_open = ((self.start != start or self.left_open) and
(other.start != start or other.left_open))
right_open = ((self.end != end or self.right_open) and
(other.end != end or other.right_open))
return Interval(start, end, left_open, right_open)
# If I have open end points and these endpoints are contained in other
if ((self.left_open and other.contains(self.start) is true) or
(self.right_open and other.contains(self.end) is true)):
# Fill in my end points and return
open_left = self.left_open and self.start not in other
open_right = self.right_open and self.end not in other
new_self = Interval(self.start, self.end, open_left, open_right)
return set((new_self, other))
return None
@property
def _boundary(self):
return FiniteSet(self.start, self.end)
def _contains(self, other):
if other.is_real is False:
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if not other.is_real is None:
return other.is_real
if self.left_open:
expr = other > self.start
else:
expr = other >= self.start
if self.right_open:
expr = And(expr, other < self.end)
else:
expr = And(expr, other <= self.end)
return _sympify(expr)
def _eval_imageset(self, f):
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.solvers.solveset import solveset
from sympy.core.function import diff, Lambda
from sympy.series import limit
from sympy.calculus.singularities import singularities
# TODO: handle functions with infinitely many solutions (eg, sin, tan)
# TODO: handle multivariate functions
expr = f.expr
if len(expr.free_symbols) > 1 or len(f.variables) != 1:
return
var = f.variables[0]
if expr.is_Piecewise:
result = S.EmptySet
domain_set = self
for (p_expr, p_cond) in expr.args:
if p_cond is S.true:
intrvl = domain_set
else:
intrvl = p_cond.as_set()
intrvl = Intersection(domain_set, intrvl)
if p_expr.is_Number:
image = FiniteSet(p_expr)
else:
image = imageset(Lambda(var, p_expr), intrvl)
result = Union(result, image)
# remove the part which has been `imaged`
domain_set = Complement(domain_set, intrvl)
if domain_set.is_EmptySet:
break
return result
if not self.start.is_comparable or not self.end.is_comparable:
return
try:
sing = [x for x in singularities(expr, var)
if x.is_real and x in self]
except NotImplementedError:
return
if self.left_open:
_start = limit(expr, var, self.start, dir="+")
elif self.start not in sing:
_start = f(self.start)
if self.right_open:
_end = limit(expr, var, self.end, dir="-")
elif self.end not in sing:
_end = f(self.end)
if len(sing) == 0:
solns = list(solveset(diff(expr, var), var))
extr = [_start, _end] + [f(x) for x in solns
if x.is_real and x in self]
start, end = Min(*extr), Max(*extr)
left_open, right_open = False, False
if _start <= _end:
# the minimum or maximum value can occur simultaneously
# on both the edge of the interval and in some interior
# point
if start == _start and start not in solns:
left_open = self.left_open
if end == _end and end not in solns:
right_open = self.right_open
else:
if start == _end and start not in solns:
left_open = self.right_open
if end == _start and end not in solns:
right_open = self.left_open
return Interval(start, end, left_open, right_open)
else:
return imageset(f, Interval(self.start, sing[0],
self.left_open, True)) + \
Union(*[imageset(f, Interval(sing[i], sing[i + 1]), True, True)
for i in range(1, len(sing) - 1)]) + \
imageset(f, Interval(sing[-1], self.end, True, self.right_open))
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._eval_evalf(prec),
self.right._eval_evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def as_relational(self, x):
"""Rewrite an interval in terms of inequalities and logic operators."""
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
def _eval_Eq(self, other):
if not other.is_Interval:
if (other.is_Union or other.is_Complement or
other.is_Intersection or other.is_ProductSet):
return
return false
return And(Eq(self.left, other.left),
Eq(self.right, other.right),
self.left_open == other.left_open,
self.right_open == other.right_open)
class Union(Set, EvalfMixin):
"""
Represents a union of sets as a :class:`Set`.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
[1, 2] U [3, 4]
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
[1, 3]
See Also
========
Intersection
References
==========
.. [1] http://en.wikipedia.org/wiki/Union_%28set_theory%29
"""
is_Union = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Union:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Union of no sets is EmptySet
if len(args) == 0:
return S.EmptySet
# Reduce sets using known rules
if evaluate:
return Union.reduce(args)
args = list(ordered(args, Set._infimum_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""
Simplify a :class:`Union` using known rules
We first start with global rules like
'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._union(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = set((new_set, ))
new_args = (args - set((s, t))).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(args, evaluate=False)
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
def _contains(self, other):
or_args = [the_set.contains(other) for the_set in self.args]
return Or(*or_args)
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for set in sets:
if set[0] in sos_list:
continue
else:
sos_list.append(set[0])
sets_list.append(set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
""" The boundary of set i minus interior of all other sets """
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(map(boundary_of_set, range(len(self.args))))
def _eval_imageset(self, f):
return Union(imageset(f, arg) for arg in self.args)
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
return Or(*[set.as_relational(symbol) for set in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def _eval_evalf(self, prec):
try:
return Union(set._eval_evalf(prec) for set in self.args)
except Exception:
raise TypeError("Not all sets are evalf-able")
def __iter__(self):
import itertools
# roundrobin recipe taken from itertools documentation:
# https://docs.python.org/2/library/itertools.html#recipes
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = itertools.cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
if all(set.is_iterable for set in self.args):
return roundrobin(*(iter(arg) for arg in self.args))
else:
raise TypeError("Not all constituent sets are iterable")
@property
@deprecated(useinstead="is_subset(S.Reals)", issue=6212, deprecated_since_version="0.7.6")
def is_real(self):
return all(set.is_real for set in self.args)
class Intersection(Set):
"""
Represents an intersection of sets as a :class:`Set`.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
[2, 3]
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
[2, 3]
See Also
========
Union
References
==========
.. [1] http://en.wikipedia.org/wiki/Intersection_%28set_theory%29
"""
is_Intersection = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Intersection:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
if len(args) == 0:
raise TypeError("Intersection expected at least one argument")
# args can't be ordered for Partition see issue #9608
if 'Partition' not in [type(a).__name__ for a in args]:
args = list(ordered(args, Set._infimum_key))
# Reduce sets using known rules
if evaluate:
return Intersection.reduce(args)
return Basic.__new__(cls, *args)
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _eval_imageset(self, f):
return Intersection(imageset(f, arg) for arg in self.args)
def _contains(self, other):
from sympy.logic.boolalg import And
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
for s in self.args:
if s.is_iterable:
other_sets = set(self.args) - set((s,))
other = Intersection(other_sets, evaluate=False)
return (x for x in s if x in other)
raise ValueError("None of the constituent sets are iterable")
@staticmethod
def reduce(args):
"""
Simplify an intersection using known rules
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# If any EmptySets return EmptySet
if any(s.is_EmptySet for s in args):
return S.EmptySet
# If any FiniteSets see which elements of that finite set occur within
# all other sets in the intersection
for s in args:
if s.is_FiniteSet:
other_args = [a for a in args if a != s]
res = FiniteSet(*[x for x in s
if all(other.contains(x) == True for other in other_args)])
unk = [x for x in s
if any(other.contains(x) not in (True, False) for other in other_args)]
if unk:
other_sets = Intersection(*other_args)
if other_sets.is_EmptySet:
return EmptySet()
res += Intersection(s.func(*unk), other_sets, evaluate=False)
return res
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - set((s,))
if len(other_sets) > 0:
other = Intersection(other_sets)
return Union(Intersection(arg, other) for arg in s.args)
else:
return Union(arg for arg in s.args)
for s in args:
if s.is_Complement:
other_sets = args + [s.args[0]]
other_sets.remove(s)
return Complement(Intersection(*other_sets), s.args[1])
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._intersect(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - set((s, t))).union(set((new_set, )))
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(args, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set, EvalfMixin):
"""Represents the set difference or relative complement of a set with
another set.
`A - B = \{x \in A| x \\notin B\}`
Examples
========
>>> from sympy import Complement, FiniteSet
>>> Complement(FiniteSet(0, 1, 2), FiniteSet(1))
{0, 2}
See Also
=========
Intersection, Union
References
==========
.. [1] http://mathworld.wolfram.com/ComplementSet.html
"""
is_Complement = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
"""
Simplify a :class:`Complement`.
"""
if B == S.UniversalSet:
return EmptySet()
if isinstance(B, Union):
return Intersection(s.complement(A) for s in B.args)
result = B._complement(A)
if result != None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
class EmptySet(with_metaclass(Singleton, Set)):
"""
Represents the empty set. The empty set is available as a singleton
as S.EmptySet.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet()
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet()
See Also
========
UniversalSet
References
==========
.. [1] http://en.wikipedia.org/wiki/Empty_set
"""
is_EmptySet = True
is_FiniteSet = True
def _intersect(self, other):
return S.EmptySet
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return False
def __len__(self):
return 0
def _union(self, other):
return other
def __iter__(self):
return iter([])
def _eval_imageset(self, f):
return self
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(with_metaclass(Singleton, Set)):
"""
Represents the set of all things.
The universal set is available as a singleton as S.UniversalSet
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet()
>>> Interval(1, 2).intersect(S.UniversalSet)
[1, 2]
See Also
========
EmptySet
References
==========
.. [1] http://en.wikipedia.org/wiki/Universal_set
"""
is_UniversalSet = True
def _intersect(self, other):
return other
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return True
def _union(self, other):
return self
@property
def _boundary(self):
return EmptySet()
class FiniteSet(Set, EvalfMixin):
"""
Represents a finite set of discrete numbers
Examples
========
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> members = [1, 2, 3, 4]
>>> FiniteSet(*members)
{1, 2, 3, 4}
References
==========
.. [1] http://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return EmptySet()
else:
args = list(map(sympify, args))
args = list(ordered(frozenset(tuple(args)), Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._elements = frozenset(args)
return obj
def _eval_Eq(self, other):
if not other.is_FiniteSet:
if (other.is_Union or other.is_Complement or
other.is_Intersection or other.is_ProductSet):
return
return false
if len(self) != len(other):
return false
return And(*(Eq(x, y) for x, y in zip(self.args, other.args)))
def __iter__(self):
return iter(self.args)
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if isinstance(other, self.__class__):
return self.__class__(*(self._elements & other._elements))
return self.__class__(el for el in self if el in other)
def _complement(self, other):
if isinstance(other, Interval):
nums = sorted(m for m in self.args if m.is_number)
if other == S.Reals and nums != []:
syms = [m for m in self.args if m.is_Symbol]
# Reals cannot contain elements other than numbers and symbols.
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(intervals, evaluate=False)
elif nums == []:
return None
elif isinstance(other, FiniteSet):
elms_unknown = FiniteSet(*[el for el in self if other.contains(el) not in (True, False)])
if elms_unknown == self:
return
return Complement(FiniteSet(*[el for el in other if self.contains(el) != True]), elms_unknown)
return Set._complement(self, other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_FiniteSet:
return FiniteSet(*(self._elements | other._elements))
# If other set contains one of my elements, remove it from myself
if any(other.contains(x) is true for x in self):
return set((
FiniteSet(*[x for x in self if other.contains(x) is not true]),
other))
return None
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
r = false
for e in self._elements:
t = Eq(e, other, evaluate=True)
if isinstance(t, Eq):
t = t.simplify()
if t == true:
return t
elif t != false:
r = None
return r
def _eval_imageset(self, f):
return FiniteSet(*map(f, self))
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
from sympy.core.relational import Eq
return Or(*[Eq(symbol, elem) for elem in self])
@property
@deprecated(useinstead="is_subset(S.Reals)", issue=6212, deprecated_since_version="0.7.6")
def is_real(self):
return all(el.is_real for el in self)
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
return FiniteSet(*[elem._eval_evalf(prec) for elem in self])
def _hashable_content(self):
return (self._elements,)
@property
def _sorted_args(self):
return tuple(ordered(self.args, Set._infimum_key))
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def __ge__(self, other):
return other.is_subset(self)
def __gt__(self, other):
return self.is_proper_superset(other)
def __le__(self, other):
return self.is_subset(other)
def __lt__(self, other):
return self.is_proper_subset(other)
class SymmetricDifference(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
{1, 2, 4, 5}
See Also
========
Complement, Union
References
==========
.. [1] http://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def imageset(*args):
r"""
Image of set under transformation ``f``.
If this function can't compute the image, it returns an
unevaluated ImageSet object.
.. math::
{ f(x) | x \in self }
Examples
========
>>> from sympy import Interval, Symbol, imageset, sin, Lambda
>>> x = Symbol('x')
>>> imageset(x, 2*x, Interval(0, 2))
[0, 4]
>>> imageset(lambda x: 2*x, Interval(0, 2))
[0, 4]
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core import Dummy, Lambda
from sympy.sets.fancysets import ImageSet
if len(args) == 3:
f = Lambda(*args[:2])
else:
# var and expr are being defined this way to
# support Python lambda and not just sympy Lambda
f = args[0]
if not isinstance(f, Lambda):
var = Dummy()
expr = args[0](var)
f = Lambda(var, expr)
set = args[-1]
r = set._eval_imageset(f)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
return imageset(Lambda(set.lamda.variables[0],
f.expr.subs(f.variables[0], set.lamda.expr)),
set.base_set)
if r is not None:
return r
return ImageSet(f, set)
| bsd-3-clause |
nichung/wwwflaskBlogrevA | venv/lib/python2.7/site-packages/pymongo/results.py | 71 | 7773 | # Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Result class definitions."""
from pymongo.errors import InvalidOperation
class _WriteResult(object):
"""Base class for write result classes."""
def __init__(self, acknowledged):
self.__acknowledged = acknowledged
def _raise_if_unacknowledged(self, property_name):
"""Raise an exception on property access if unacknowledged."""
if not self.__acknowledged:
raise InvalidOperation("A value for %s is not available when "
"the write is unacknowledged. Check the "
"acknowledged attribute to avoid this "
"error." % (property_name,))
@property
def acknowledged(self):
"""Is this the result of an acknowledged write operation?
The :attr:`acknowledged` attribute will be ``False`` when using
``WriteConcern(w=0)``, otherwise ``True``.
.. note::
If the :attr:`acknowledged` attribute is ``False`` all other
attibutes of this class will raise
:class:`~pymongo.errors.InvalidOperation` when accessed. Values for
other attributes cannot be determined if the write operation was
unacknowledged.
.. seealso::
:class:`~pymongo.write_concern.WriteConcern`
"""
return self.__acknowledged
class InsertOneResult(_WriteResult):
"""The return type for :meth:`~pymongo.collection.Collection.insert_one`.
"""
__slots__ = ("__inserted_id", "__acknowledged")
def __init__(self, inserted_id, acknowledged):
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
"""The inserted document's _id."""
return self.__inserted_id
class InsertManyResult(_WriteResult):
"""The return type for :meth:`~pymongo.collection.Collection.insert_many`.
"""
__slots__ = ("__inserted_ids", "__acknowledged")
def __init__(self, inserted_ids, acknowledged):
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
"""A list of _ids of the inserted documents, in the order provided.
.. note:: If ``False`` is passed for the `ordered` parameter to
:meth:`~pymongo.collection.Collection.insert_many` the server
may have inserted the documents in a different order than what
is presented here.
"""
return self.__inserted_ids
class UpdateResult(_WriteResult):
"""The return type for :meth:`~pymongo.collection.Collection.update_one`,
:meth:`~pymongo.collection.Collection.update_many`, and
:meth:`~pymongo.collection.Collection.replace_one`.
"""
__slots__ = ("__raw_result", "__acknowledged")
def __init__(self, raw_result, acknowledged):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
"""The raw result document returned by the server."""
return self.__raw_result
@property
def matched_count(self):
"""The number of documents matched for this update."""
self._raise_if_unacknowledged("matched_count")
if self.upserted_id is not None:
return 0
return self.__raw_result.get("n", 0)
@property
def modified_count(self):
"""The number of documents modified.
.. note:: modified_count is only reported by MongoDB 2.6 and later.
When connected to an earlier server version, or in certain mixed
version sharding configurations, this attribute will be set to
``None``.
"""
self._raise_if_unacknowledged("modified_count")
return self.__raw_result.get("nModified")
@property
def upserted_id(self):
"""The _id of the inserted document if an upsert took place. Otherwise
``None``.
"""
self._raise_if_unacknowledged("upserted_id")
return self.__raw_result.get("upserted")
class DeleteResult(_WriteResult):
"""The return type for :meth:`~pymongo.collection.Collection.delete_one`
and :meth:`~pymongo.collection.Collection.delete_many`"""
__slots__ = ("__raw_result", "__acknowledged")
def __init__(self, raw_result, acknowledged):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
"""The raw result document returned by the server."""
return self.__raw_result
@property
def deleted_count(self):
"""The number of documents deleted."""
self._raise_if_unacknowledged("deleted_count")
return self.__raw_result.get("n", 0)
class BulkWriteResult(_WriteResult):
"""An object wrapper for bulk API write results."""
__slots__ = ("__bulk_api_result", "__acknowledged")
def __init__(self, bulk_api_result, acknowledged):
"""Create a BulkWriteResult instance.
:Parameters:
- `bulk_api_result`: A result dict from the bulk API
- `acknowledged`: Was this write result acknowledged? If ``False``
then all properties of this object will raise
:exc:`~pymongo.errors.InvalidOperation`.
"""
self.__bulk_api_result = bulk_api_result
super(BulkWriteResult, self).__init__(acknowledged)
@property
def bulk_api_result(self):
"""The raw bulk API result."""
return self.__bulk_api_result
@property
def inserted_count(self):
"""The number of documents inserted."""
self._raise_if_unacknowledged("inserted_count")
return self.__bulk_api_result.get("nInserted")
@property
def matched_count(self):
"""The number of documents matched for an update."""
self._raise_if_unacknowledged("matched_count")
return self.__bulk_api_result.get("nMatched")
@property
def modified_count(self):
"""The number of documents modified.
.. note:: modified_count is only reported by MongoDB 2.6 and later.
When connected to an earlier server version, or in certain mixed
version sharding configurations, this attribute will be set to
``None``.
"""
self._raise_if_unacknowledged("modified_count")
return self.__bulk_api_result.get("nModified")
@property
def deleted_count(self):
"""The number of documents deleted."""
self._raise_if_unacknowledged("deleted_count")
return self.__bulk_api_result.get("nRemoved")
@property
def upserted_count(self):
"""The number of documents upserted."""
self._raise_if_unacknowledged("upserted_count")
return self.__bulk_api_result.get("nUpserted")
@property
def upserted_ids(self):
"""A map of operation index to the _id of the upserted document."""
self._raise_if_unacknowledged("upserted_ids")
if self.__bulk_api_result:
return dict((upsert["index"], upsert["_id"])
for upsert in self.bulk_api_result["upserted"])
| mit |
aerickson/ansible | lib/ansible/modules/system/facter.py | 69 | 1855 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: facter
short_description: Runs the discovery program I(facter) on the remote system
description:
- Runs the I(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
JSON data that can be useful for inventory purposes.
version_added: "0.2"
options: {}
notes: []
requirements: [ "facter", "ruby-json" ]
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Example command-line invocation
ansible www.example.net -m facter
'''
def main():
module = AnsibleModule(
argument_spec = dict()
)
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--puppet", "--json"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gdbdzgd/aptly | system/t09_repo/edit.py | 10 | 2271 | import os
import inspect
from lib import BaseTest
changesRemove = lambda _, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), "")
class EditRepo1Test(BaseTest):
"""
edit repo: change comment
"""
fixtureCmds = [
"aptly repo create repo1",
]
runCmd = "aptly repo edit -comment=Lala repo1"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo1", "repo-show")
class EditRepo2Test(BaseTest):
"""
edit repo: change distribution & component
"""
fixtureCmds = [
"aptly repo create -comment=Lala -component=non-free repo2",
]
runCmd = "aptly repo edit -distribution=wheezy -component=contrib repo2"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo2", "repo-show")
class EditRepo3Test(BaseTest):
"""
edit repo: no such repo
"""
runCmd = "aptly repo edit repo3"
expectedCode = 1
class EditRepo4Test(BaseTest):
"""
edit repo: add uploaders.json
"""
fixtureCmds = [
"aptly repo create repo4",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders2.json repo4"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo4", "repo_show")
class EditRepo5Test(BaseTest):
"""
edit repo: with broken uploaders.json
"""
fixtureCmds = [
"aptly repo create repo5",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders3.json repo5"
expectedCode = 1
class EditRepo6Test(BaseTest):
"""
edit local repo: with missing uploaders.json
"""
fixtureCmds = [
"aptly repo create repo6",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders-not-found.json repo6"
expectedCode = 1
outputMatchPrepare = changesRemove
class EditRepo7Test(BaseTest):
"""
edit local repo: remove uploaders.json
"""
fixtureCmds = [
"aptly repo create -uploaders-file=${changes}/uploaders2.json repo7",
]
runCmd = "aptly repo edit -uploaders-file= repo7"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo7", "repo_show")
| mit |
funbaker/astropy | astropy/utils/collections.py | 6 | 1521 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A module containing specialized collection classes.
"""
class HomogeneousList(list):
"""
A subclass of list that contains only elements of a given type or
types. If an item that is not of the specified type is added to
the list, a `TypeError` is raised.
"""
def __init__(self, types, values=[]):
"""
Parameters
----------
types : sequence of types
The types to accept.
values : sequence, optional
An initial set of values.
"""
self._types = types
super().__init__()
self.extend(values)
def _assert(self, x):
if not isinstance(x, self._types):
raise TypeError(
"homogeneous list must contain only objects of "
"type '{}'".format(self._types))
def __iadd__(self, other):
self.extend(other)
return self
def __setitem__(self, idx, value):
if isinstance(idx, slice):
value = list(value)
for item in value:
self._assert(item)
else:
self._assert(value)
return super().__setitem__(idx, value)
def append(self, x):
self._assert(x)
return super().append(x)
def insert(self, i, x):
self._assert(x)
return super().insert(i, x)
def extend(self, x):
for item in x:
self._assert(item)
super().append(item)
| bsd-3-clause |
reimandlab/ActiveDriverDB | website/hash_set_db.py | 2 | 7429 | from pathlib import Path
import gc
from collections import defaultdict
from contextlib import contextmanager
from typing import Iterable, Union
from database.lightning import LightningInterface
class SetWithCallback(set):
"""A set implementation that triggers callbacks on `add` or `update`.
It has an important use in HashSet database implementation:
it allows a user to modify sets like native Python's structures while all
the changes are forwarded to the database, without additional user's action.
"""
_modifying_methods = {'update', 'add'}
def __init__(self, items, callback):
super().__init__(items)
self.callback = callback
for method_name in self._modifying_methods:
method = getattr(self, method_name)
setattr(self, method_name, self._wrap_method(method))
def _wrap_method(self, method):
def new_method_with_callback(*args, **kwargs):
result = method(*args, **kwargs)
self.callback(self)
return result
return new_method_with_callback
class DatabaseNotOpened(Exception):
pass
def require_open(func):
def func_working_only_if_db_is_open(self, *args, **kwargs):
if not self.is_open:
raise DatabaseNotOpened
return func(self, *args, **kwargs)
return func_working_only_if_db_is_open
class HashSet:
"""A hash-indexed database where values are equivalent to Python's sets."""
def __init__(self, name=None, integer_values=False):
self.is_open = False
self.path: Path
self.integer_values = integer_values
if name:
self.open(name)
def _create_path(self, name) -> Path:
"""Returns path to a file containing the database.
The file is not guaranteed to exist, although the 'databases' directory
will be created (if it does not exist).
"""
self.name = name
db_dir = path_relative_to_app(name)
db_dir.mkdir(parents=True, exist_ok=True)
return db_dir
def open(self, name, readonly=False, size=1e5, write_map=True, **kwargs):
"""Open hash database in a given mode.
By default it opens a database in read-write mode and in case
if a database of given name does not exists it creates one.
"""
path = self._create_path(name)
self.path = path
self.db = LightningInterface(path, map_size=size, readonly=readonly, writemap=write_map, **kwargs)
self.is_open = True
def close(self):
self.db.close()
@require_open
def __getitem__(self, key) -> set:
"""key: has to be str"""
key = bytes(key, 'utf-8')
try:
items = filter(
bool,
self.db.get(key).decode().split('|')
)
if self.integer_values:
items = map(int, items)
except (KeyError, AttributeError):
items = []
return SetWithCallback(
items,
lambda new_set: self.__setitem__(key, new_set)
)
def items(self):
"""Yields (key, iterator over items from value set) tuples.
All atomic elements are returned as plain strings.
"""
decode = bytes.decode
split = str.split
for key, value in self.db.items():
try:
yield key.decode(), filter(bool, split(decode(value), '|'))
except (KeyError, AttributeError):
pass
def values(self):
"""Yields iterators over items from value set.
All atomic elements are returned as plain strings.
"""
decode = bytes.decode
split = str.split
for key, value in self.db.items():
try:
yield filter(bool, split(decode(value), '|'))
except (KeyError, AttributeError):
pass
def update(self, key, value):
key = bytes(key, 'utf-8')
try:
items = self._to_set(self.db.get(key))
except (KeyError, AttributeError):
items = set()
assert '|' not in value
items.update((bytes(v, 'utf-8') for v in value))
self.db[key] = b'|'.join(items)
def _get(self, key):
try:
items = self._to_set(self.db.get(key))
except (KeyError, AttributeError):
items = set()
return items
@staticmethod
def _to_set(value: bytes):
return set(
filter(
bool,
value.split(b'|')
)
)
def add(self, key, value):
key = bytes(key, 'utf-8')
items = self._get(key)
assert '|' not in value
items.add(bytes(value, 'utf-8'))
self.db[key] = b'|'.join(items)
@require_open
def __setitem__(self, key: Union[str, bytes], items: Iterable[Union[str, int]]):
if self.integer_values:
items = map(str, items)
else:
assert all('|' not in item for item in items)
if not isinstance(key, bytes):
key = bytes(key, 'utf-8')
self.db[key] = bytes('|'.join(items), 'utf-8')
@require_open
def __len__(self):
return len(self.db)
@require_open
def drop(self, not_exists_ok=True):
try:
for f in self.path.glob('*'):
f.unlink()
except FileNotFoundError:
if not_exists_ok:
pass
else:
raise
finally:
self.path.mkdir(exist_ok=True)
@require_open
def reset(self):
"""Reset database completely by its removal and recreation."""
self.drop()
self.open(self.name)
@require_open
def reload(self):
self.close()
self.open(self.name)
class HashSetWithCache(HashSet):
def __init__(self, name=None, integer_values=False):
self.in_cached_session = False
self.cache = {}
self.i = None
super().__init__(name=name, integer_values=integer_values)
def cached_add(self, key: str, value: str):
self.cache[bytes(key, 'utf-8')].add(bytes(value, 'utf-8'))
def cached_add_integer(self, key: str, value: int):
self.cache[bytes(key, 'utf-8')].add(b'%d' % value)
def flush_cache(self):
assert self.in_cached_session
with self.db.env.begin(write=True) as transaction:
put = transaction.put
get = transaction.get
to_set = self._to_set
for key, items in self.cache.items():
old_values = get(key) # will return None if the key does not exist in the db
if old_values:
items.update(to_set(old_values))
for key, items in self.cache.items():
put(key, b'|'.join(items))
self.cache = defaultdict(set)
self.i += 1
if self.i % 100 == 99:
gc.collect()
@contextmanager
def cached_session(self):
self.i = 0
old_cache = self.cache
self.in_cached_session = True
self.cache = defaultdict(set)
yield
print('Flushing changes')
self.flush_cache()
self.cache = old_cache
self.in_cached_session = False
def path_relative_to_app(path):
path = Path(path)
base_dir = Path(__file__).parent.resolve()
return base_dir / path
| lgpl-2.1 |
yasoob/timely | tests/test_urls.py | 14 | 1210 | #! ../env/bin/python
# -*- coding: utf-8 -*-
import pytest
create_user = True
@pytest.mark.usefixtures("testapp")
class TestURLs:
def test_home(self, testapp):
""" Tests if the home page loads """
rv = testapp.get('/')
assert rv.status_code == 200
def test_login(self, testapp):
""" Tests if the login page loads """
rv = testapp.get('/login')
assert rv.status_code == 200
def test_logout(self, testapp):
""" Tests if the logout page loads """
rv = testapp.get('/logout')
assert rv.status_code == 302
def test_restricted_logged_out(self, testapp):
""" Tests if the restricted page returns a 302
if the user is logged out
"""
rv = testapp.get('/restricted')
assert rv.status_code == 302
def test_restricted_logged_in(self, testapp):
""" Tests if the restricted page returns a 200
if the user is logged in
"""
testapp.post('/login', data=dict(
username='admin',
password="supersafepassword"
), follow_redirects=True)
rv = testapp.get('/restricted')
assert rv.status_code == 200
| mit |
vsajip/django | tests/regressiontests/localflavor/be/tests.py | 13 | 3006 | from __future__ import unicode_literals
from django.contrib.localflavor.be.forms import (BEPostalCodeField,
BEPhoneNumberField, BERegionSelect, BEProvinceSelect)
from django.test import SimpleTestCase
class BELocalFlavorTests(SimpleTestCase):
def test_BEPostalCodeField(self):
error_format = ['Enter a valid postal code in the range and format 1XXX - 9XXX.']
valid = {
'1451': '1451',
'2540': '2540',
}
invalid = {
'0287': error_format,
'14309': error_format,
'873': error_format,
'35 74': error_format,
'859A': error_format,
}
self.assertFieldOutput(BEPostalCodeField, valid, invalid)
def test_BEPhoneNumberField(self):
error_format = [
('Enter a valid phone number in one of the formats 0x xxx xx xx, '
'0xx xx xx xx, 04xx xx xx xx, 0x/xxx.xx.xx, 0xx/xx.xx.xx, '
'04xx/xx.xx.xx, 0x.xxx.xx.xx, 0xx.xx.xx.xx, 04xx.xx.xx.xx, '
'0xxxxxxxx or 04xxxxxxxx.')
]
valid = {
'01 234 56 78': '01 234 56 78',
'01/234.56.78': '01/234.56.78',
'01.234.56.78': '01.234.56.78',
'012 34 56 78': '012 34 56 78',
'012/34.56.78': '012/34.56.78',
'012.34.56.78': '012.34.56.78',
'0412 34 56 78': '0412 34 56 78',
'0412/34.56.78': '0412/34.56.78',
'0412.34.56.78': '0412.34.56.78',
'012345678': '012345678',
'0412345678': '0412345678',
}
invalid = {
'01234567': error_format,
'12/345.67.89': error_format,
'012/345.678.90': error_format,
'012/34.56.789': error_format,
'0123/45.67.89': error_format,
'012/345 678 90': error_format,
'012/34 56 789': error_format,
'012.34 56 789': error_format,
}
self.assertFieldOutput(BEPhoneNumberField, valid, invalid)
def test_BERegionSelect(self):
f = BERegionSelect()
out = '''<select name="regions">
<option value="BRU">Brussels Capital Region</option>
<option value="VLG" selected="selected">Flemish Region</option>
<option value="WAL">Wallonia</option>
</select>'''
self.assertHTMLEqual(f.render('regions', 'VLG'), out)
def test_BEProvinceSelect(self):
f = BEProvinceSelect()
out = '''<select name="provinces">
<option value="VAN">Antwerp</option>
<option value="BRU">Brussels</option>
<option value="VOV">East Flanders</option>
<option value="VBR">Flemish Brabant</option>
<option value="WHT">Hainaut</option>
<option value="WLG" selected="selected">Liege</option>
<option value="VLI">Limburg</option>
<option value="WLX">Luxembourg</option>
<option value="WNA">Namur</option>
<option value="WBR">Walloon Brabant</option>
<option value="VWV">West Flanders</option>
</select>'''
self.assertHTMLEqual(f.render('provinces', 'WLG'), out)
| bsd-3-clause |
AmberWhiteSky/shadowsocks | shadowsocks/common.py | 945 | 8921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
jmhsi/justin_tinker | data_science/courses/temp/sgdr.py | 1 | 6126 | from .imports import *
from .layer_optimizer import *
import copy
class Callback:
def on_train_begin(self): pass
def on_batch_begin(self): pass
def on_epoch_end(self, metrics): pass
def on_batch_end(self, metrics): pass
def on_train_end(self): pass
class LossRecorder(Callback):
def __init__(self, layer_opt):
super().__init__()
self.layer_opt=layer_opt
self.init_lrs=np.array(layer_opt.lrs)
def on_train_begin(self):
self.losses,self.lrs,self.iterations = [],[],[]
self.iteration = 0
self.epoch = 0
def on_epoch_end(self, metrics):
self.epoch += 1
def on_batch_end(self, loss):
self.iteration += 1
self.lrs.append(self.layer_opt.lr)
self.iterations.append(self.iteration)
self.losses.append(loss)
def plot_loss(self):
plt.plot(self.iterations[10:], self.losses[10:])
def plot_lr(self):
plt.xlabel("iterations")
plt.ylabel("learning rate")
plt.plot(self.iterations, self.lrs)
class LR_Updater(LossRecorder):
def on_train_begin(self):
super().on_train_begin()
self.update_lr()
def on_batch_end(self, loss):
res = super().on_batch_end(loss)
self.update_lr()
return res
def update_lr(self):
new_lrs = self.calc_lr(self.init_lrs)
self.layer_opt.set_lrs(new_lrs)
@abstractmethod
def calc_lr(self, init_lrs): raise NotImplementedError
class LR_Finder(LR_Updater):
def __init__(self, layer_opt, nb, end_lr=10):
self.lr_mult = (end_lr/layer_opt.lr)**(1/nb)
super().__init__(layer_opt)
def on_train_begin(self):
super().on_train_begin()
self.best=1e9
def calc_lr(self, init_lrs): return init_lrs * (self.lr_mult**self.iteration)
def on_batch_end(self, loss):
if math.isnan(loss) or loss>self.best*4:
return True
if (loss<self.best and self.iteration>10): self.best=loss
return super().on_batch_end(loss)
def plot(self, n_skip=10):
plt.ylabel("loss")
plt.xlabel("learning rate (log scale)")
plt.plot(self.lrs[n_skip:-5], self.losses[n_skip:-5])
plt.xscale('log')
class CosAnneal(LR_Updater):
def __init__(self, layer_opt, nb, on_cycle_end=None, cycle_mult=1):
self.nb,self.on_cycle_end,self.cycle_mult = nb,on_cycle_end,cycle_mult
super().__init__(layer_opt)
def on_train_begin(self):
self.cycle_iter,self.cycle_count=0,0
super().on_train_begin()
def calc_lr(self, init_lrs):
if self.iteration<self.nb/20:
self.cycle_iter += 1
return init_lrs/100.
cos_out = np.cos(np.pi*(self.cycle_iter)/self.nb) + 1
self.cycle_iter += 1
if self.cycle_iter==self.nb:
self.cycle_iter = 0
self.nb *= self.cycle_mult
if self.on_cycle_end:
self.on_cycle_end(self, self.cycle_count)
self.cycle_count += 1
return init_lrs / 2 * cos_out
class WeightDecaySchedule(Callback):
def __init__(self, layer_opt, batch_per_epoch, cycle_len, cycle_mult, n_cycles, norm_wds=False, wds_sched_mult=None):
"""
Implements the weight decay schedule as mentioned in https://arxiv.org/abs/1711.05101
:param layer_opt: The LayerOptimizer
:param batch_per_epoch: Num batches in 1 epoch
:param cycle_len: Num epochs in initial cycle. Subsequent cycle_len = previous cycle_len * cycle_mult
:param cycle_mult: Cycle multiplier
:param n_cycles: Number of cycles to be executed
"""
super().__init__()
self.layer_opt = layer_opt
self.batch_per_epoch = batch_per_epoch
self.init_wds = np.array(layer_opt.wds) # Weights as set by user
self.init_lrs = np.array(layer_opt.lrs) # Learning rates as set by user
self.new_wds = None # Holds the new weight decay factors, calculated in on_batch_begin()
self.param_groups_old = None # Caches the old parameter values in on_batch_begin()
self.iteration = 0
self.epoch = 0
self.wds_sched_mult = wds_sched_mult
self.norm_wds = norm_wds
self.wds_history = list()
# Pre calculating the number of epochs in the cycle of current running epoch
self.epoch_to_num_cycles, i = dict(), 0
for cycle in range(n_cycles):
for _ in range(cycle_len):
self.epoch_to_num_cycles[i] = cycle_len
i += 1
cycle_len *= cycle_mult
def on_train_begin(self):
self.iteration = 0
self.epoch = 0
def on_batch_begin(self):
# Prepare for decay of weights
# Default weight decay (as provided by user)
wdn = self.init_wds
# Weight decay multiplier (The 'eta' in the paper). Optional.
wdm = 1.0
if self.wds_sched_mult is not None:
wdm = self.wds_sched_mult(self)
# Weight decay normalized. Optional.
if self.norm_wds:
wdn = wdn / np.sqrt(self.batch_per_epoch * self.epoch_to_num_cycles[self.epoch])
# Final wds
self.new_wds = wdm * wdn
# Record the wds
self.wds_history.append(self.new_wds)
# Set weight_decay with zeros so that it is not applied in Adam, we will apply it outside in on_batch_end()
self.layer_opt.set_wds(torch.zeros(self.new_wds.size))
# We have to save the existing weights before the optimizer changes the values
self.param_groups_old = copy.deepcopy(self.layer_opt.opt.param_groups)
self.iteration += 1
def on_batch_end(self, loss):
# Decay the weights
for group, group_old, wds in zip(self.layer_opt.opt.param_groups, self.param_groups_old, self.new_wds):
for p, p_old in zip(group['params'], group_old['params']):
if p.grad is None:
continue
p.data = p.data.add(-wds, p_old.data)
def on_epoch_end(self, metrics):
self.epoch += 1
| apache-2.0 |
nwokeo/supysonic | venv/lib/python2.7/site-packages/PIL/ImageDraw.py | 8 | 12705 | #
# The Python Imaging Library
# $Id$
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import numbers
from . import Image, ImageColor
from ._util import isStringType
"""
A simple 2D drawing interface for PIL images.
<p>
Application code should use the <b>Draw</b> factory, instead of
directly.
"""
class ImageDraw(object):
def __init__(self, im, mode=None):
"""
Create a drawing instance.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def getfont(self):
"""Get the current default font."""
if not self.font:
# FIXME: should add a font repository
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
def arc(self, xy, start, end, fill=None):
"""Draw an arc."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
def bitmap(self, xy, bitmap, fill=None):
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(self, xy, start, end, fill=None, outline=None):
"""Draw a chord."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
def ellipse(self, xy, fill=None, outline=None):
"""Draw an ellipse."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
def line(self, xy, fill=None, width=0):
"""Draw a line, or a connected sequence of line segments."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
def shape(self, shape, fill=None, outline=None):
"""(Experimental) Draw a shape."""
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
def pieslice(self, xy, start, end, fill=None, outline=None):
"""Draw a pieslice."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
def point(self, xy, fill=None):
"""Draw one or more individual pixels."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
def polygon(self, xy, fill=None, outline=None):
"""Draw a polygon."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
def rectangle(self, xy, fill=None, outline=None):
"""Draw a rectangle."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
def _multiline_check(self, text):
"""Draw text."""
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def text(self, xy, text, fill=None, font=None, anchor=None,
*args, **kwargs):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor,
*args, **kwargs)
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
def multiline_text(self, xy, text, fill=None, font=None, anchor=None,
spacing=4, align="left"):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
assert False, 'align must be "left", "center" or "right"'
self.text((left, top), line, fill, font, anchor)
top += line_spacing
left = xy[0]
def textsize(self, text, font=None, *args, **kwargs):
"""Get the size of a given string, in pixels."""
if self._multiline_check(text):
return self.multiline_textsize(text, font, *args, **kwargs)
if font is None:
font = self.getfont()
return font.getsize(text)
def multiline_textsize(self, text, font=None, spacing=4):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except AttributeError:
Outline = None
def getdraw(im=None, hints=None):
"""
(Experimental) A more advanced 2D drawing interface for PIL images,
based on the WCK interface.
:param im: The image to draw in.
:param hints: An optional list of hints.
:returns: A (drawing context, drawing resource factory) tuple.
"""
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
from . import _imagingagg as handler
except ImportError:
pass
if handler is None:
from . import ImageDraw2 as handler
if im:
im = handler.Draw(im)
return im, handler
def floodfill(image, xy, value, border=None):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple).
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
"""
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if background == value:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p == background:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
| agpl-3.0 |
MSeifert04/astropy | astropy/timeseries/core.py | 12 | 3280 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from types import FunctionType
from contextlib import contextmanager
from functools import wraps
from astropy.table import QTable
__all__ = ['BaseTimeSeries', 'autocheck_required_columns']
COLUMN_RELATED_METHODS = ['add_column',
'add_columns',
'keep_columns',
'remove_column',
'remove_columns',
'rename_column']
def autocheck_required_columns(cls):
"""
This is a decorator that ensures that the table contains specific
methods indicated by the _required_columns attribute. The aim is to
decorate all methods that might affect the columns in the table and check
for consistency after the methods have been run.
"""
def decorator_method(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
result = method(self, *args, **kwargs)
self._check_required_columns()
return result
return wrapper
for name in COLUMN_RELATED_METHODS:
if (not hasattr(cls, name) or
not isinstance(getattr(cls, name), FunctionType)):
raise ValueError(f"{name} is not a valid method")
setattr(cls, name, decorator_method(getattr(cls, name)))
return cls
class BaseTimeSeries(QTable):
_required_columns = None
_required_columns_enabled = True
# If _required_column_relax is True, we don't require the columns to be
# present but we do require them to be the correct ones IF present. Note
# that this is a temporary state - as soon as the required columns
# are all present, we toggle this to False
_required_columns_relax = False
def _check_required_columns(self):
if not self._required_columns_enabled:
return
if self._required_columns is not None:
if self._required_columns_relax:
required_columns = self._required_columns[:len(self.colnames)]
else:
required_columns = self._required_columns
plural = 's' if len(required_columns) > 1 else ''
if not self._required_columns_relax and len(self.colnames) == 0:
raise ValueError("{} object is invalid - expected '{}' "
"as the first column{} but time series has no columns"
.format(self.__class__.__name__, required_columns[0], plural))
elif self.colnames[:len(required_columns)] != required_columns:
raise ValueError("{} object is invalid - expected '{}' "
"as the first column{} but found '{}'"
.format(self.__class__.__name__, required_columns[0], plural, self.colnames[0]))
if (self._required_columns_relax
and self._required_columns == self.colnames[:len(self._required_columns)]):
self._required_columns_relax = False
@contextmanager
def _delay_required_column_checks(self):
self._required_columns_enabled = False
yield
self._required_columns_enabled = True
self._check_required_columns()
| bsd-3-clause |
dlmeduLi/fraglen | fraglen.py | 1 | 5259 | #!/usr/bin/env python
from __future__ import print_function
import re
import os
import os.path
import optparse
import sys
import pysam
import subprocess
# global regular expressions
readKeyRe = re.compile('(.*)[\s\/][1-2](.*)')
readNumRe = re.compile('.*[\s\/]([1-2]).*')
# sort bam file with pysam.sort function
# the bam file should be sorted by qname
def SortSam(inBam, outBam):
pysam.sort("-n", inBam, outBam)
def TrimReadSeq(seq, cigar):
trimedSeq = ''
pos = 0
for m in cigar :
if(m[0] == 0) : # M
trimedSeq += seq[pos : pos + m[1]]
pos += m[1]
elif(m[0] == 1): # I
pos += m[1]
elif(m[0] == 2): # D
trimedSeq += ('N' * m[1])
elif(m[0] == 3): # N
trimedSeq += ('N' * m[1])
elif(m[0] == 4): # S
pos += m[1]
return trimedSeq
def FormatPosList(posList):
if(len(posList) == 0):
return 'NA'
return ','.join(map(str, posList))
##### write fragement length #####
def WriteFragLen(fragType,chrname, pos, fragLen, outFile):
# outFile.write('%s\t%s\t%s\t%15ld\t%6d\n' % (frg, fragType, chrname, pos, fragLen))
outFile.write('%s\t%s\t%15ld\t%6d\n' % (fragType, chrname, pos, fragLen))
##### get fragement length from SE resd #####
def FragOfSingleReads(dictSingle, bamFile, outFile):
for read in dictSingle.itervalues():
readSeq = TrimReadSeq(read.seq, read.cigar)
fragLen = len(readSeq)
readPos = read.pos
tag = read.qname
chrname = bamFile.getrname(read.rname)
WriteFragLen('S', chrname, readPos, fragLen, outFile)
##### get fragement length from PE resd #####
def FragOfPairedReads(dictPaired, bamFile, outFile):
for pair in dictPaired.itervalues():
if(not (pair[0] and pair[1])):
return
chrname = bamFile.getrname(pair[0].rname)
readSeq1 = TrimReadSeq(pair[0].seq, pair[0].cigar)
readSeq2 = TrimReadSeq(pair[1].seq, pair[1].cigar)
readSeqLen1 = len(readSeq1)
readSeqLen2 = len(readSeq2)
fragPos = pair[0].pos
fragLen = pair[1].pos + readSeqLen2 - fragPos
WriteFragLen('P', chrname, fragPos, fragLen, outFile)
def main():
# parse the command line options
usage = 'usage: %prog [options] input.bam -o output.csv'
parser = optparse.OptionParser(usage=usage, version='%prog 0.1.0')
parser.add_option('-o', '--output-file', dest='outputfile',
help='write the result to output file')
parser.add_option('-s', '--sort',
action="store_true", dest="sort", default=False,
help='sort the input BAM file before data processing')
(options, args) = parser.parse_args()
if(len(args) != 1):
parser.print_help()
sys.exit(0)
inputBamFileName = args[0]
bamFileName = inputBamFileName
baseFileName = os.path.splitext(os.path.basename(inputBamFileName))[0]
outputFileName = baseFileName + '.fraglen.csv'
logFileName = baseFileName + '.fraglen.log'
# sort the input bam file if -s option is set
rmTemp = False
if(options.sort):
print('[*] Sorting by QNAME...')
bamFileName = 'sorted.' + baseFileName
SortSam(inputBamFileName, bamFileName)
bamFileName += '.bam'
rmTemp = True
# load input files
print('[*] Initializing...')
if(not os.path.exists(bamFileName)):
print('error: Failed to open file "', bamFileName, '"')
sys.exit(-1)
bamFile = pysam.AlignmentFile(bamFileName, "rb")
# prepare output files
if(options.outputfile):
outputFileName = options.outputfile
try:
outFile = open(outputFileName, 'w')
except IOError:
print('error: write to output file failed!')
sys.exit(-1)
outFile.write('type\tchr\tpos\tlen\n')
# analyse algnments
print('[*] Analyzing...')
dictSingle = {}
dictPaired = {}
currentGroupKey = ''
readCount = 0
for read in bamFile.fetch(until_eof = True):
try:
groupKey = ''.join(readKeyRe.findall(read.qname)[0])
except:
groupKey = read.qname
if(groupKey != currentGroupKey):
currentGroupKey = groupKey
# handle and write results
FragOfSingleReads(dictSingle, bamFile, outFile)
FragOfPairedReads(dictPaired, bamFile, outFile)
dictPaired.clear()
dictSingle.clear()
# check if it is properly paired
if(read.is_proper_pair):
# paired reads
chrpos = bamFile.getrname(read.rname).strip() + str(read.pos)
chrposNext = bamFile.getrname(read.rnext).strip() + str(read.pnext)
if(chrpos < chrposNext):
readKey = groupKey + ':' + chrpos + ':' + chrposNext
else:
readKey = groupKey + ':' + chrposNext + ':' + chrpos
if(read.is_reverse):
readType = 1
else:
readType = 0
if(readType == 0 or readType == 1):
if(readKey in dictPaired):
dictPaired[readKey][readType] = read
else:
dictPaired[readKey] = [None, None]
dictPaired[readKey][readType] = read
else:
# single read
try:
readKey = groupKey + ':' + readNumRe.findall(read.qname)
except:
readKey = groupKey
dictSingle[readKey] = read
# progress
readCount += 1
sys.stdout.write('\r read: #%ld' % (readCount))
sys.stdout.flush()
FragOfSingleReads(dictSingle, bamFile, outFile)
FragOfPairedReads(dictPaired, bamFile, outFile)
sys.stdout.write('\n')
# release resources
bamFile.close()
outFile.close()
if(rmTemp):
os.unlink(bamFileName)
# ggplot2
subprocess.call (["Rscript","plot_fraglen.R",outputFileName,baseFileName])
print('[*] Complete')
if __name__ == '__main__':
main()
| gpl-2.0 |
tashaxe/Red-DiscordBot | lib/future/backports/email/mime/audio.py | 82 | 2815 | # Copyright (C) 2001-2007 Python Software Foundation
# Author: Anthony Baxter
# Contact: email-sig@python.org
"""Class representing audio/* type MIME documents."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
__all__ = ['MIMEAudio']
import sndhdr
from io import BytesIO
from future.backports.email import encoders
from future.backports.email.mime.nonmultipart import MIMENonMultipart
_sndhdr_MIMEmap = {'au' : 'basic',
'wav' :'x-wav',
'aiff':'x-aiff',
'aifc':'x-aiff',
}
# There are others in sndhdr that don't have MIME types. :(
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
def _whatsnd(data):
"""Try to identify a sound file type.
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
we re-do it here. It would be easier to reverse engineer the Unix 'file'
command and use the standard 'magic' file, as shipped with a modern Unix.
"""
hdr = data[:512]
fakefile = BytesIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
return _sndhdr_MIMEmap.get(res[0])
return None
class MIMEAudio(MIMENonMultipart):
"""Class for generating audio/* MIME documents."""
def __init__(self, _audiodata, _subtype=None,
_encoder=encoders.encode_base64, **_params):
"""Create an audio/* type MIME document.
_audiodata is a string containing the raw audio data. If this data
can be decoded by the standard Python `sndhdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific audio subtype via the
_subtype parameter. If _subtype is not given, and no subtype can be
guessed, a TypeError is raised.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
_subtype = _whatsnd(_audiodata)
if _subtype is None:
raise TypeError('Could not find audio MIME subtype')
MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
self.set_payload(_audiodata)
_encoder(self)
| gpl-3.0 |
great-expectations/great_expectations | tests/integration/test_script_runner.py | 1 | 16781 | import enum
import os
import shutil
import subprocess
import sys
import pytest
from assets.scripts.build_gallery import execute_shell_command
from great_expectations.data_context.util import file_relative_path
class BackendDependencies(enum.Enum):
BIGQUERY = "BIGQUERY"
MYSQL = "MYSQL"
MSSQL = "MSSQL"
PANDAS = "PANDAS"
POSTGRESQL = "POSTGRESQL"
REDSHIFT = "REDSHIFT"
SPARK = "SPARK"
SQLALCHEMY = "SQLALCHEMY"
SNOWFLAKE = "SNOWFLAKE"
docs_test_matrix = [
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/bigquery_yaml_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.BIGQUERY,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/bigquery_python_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.BIGQUERY,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/pandas_s3_yaml_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/pandas_s3_python_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/spark_s3_yaml_example.py",
# "extra_backend_dependencies": BackendDependencies.SPARK,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/spark_s3_python_example.py",
# "extra_backend_dependencies": BackendDependencies.SPARK,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/redshift_python_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.REDSHIFT,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/redshift_yaml_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.REDSHIFT,
# },
{
"name": "getting_started",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/docusaurus/tutorials/getting-started/getting_started.py",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/pandas_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/pandas_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/postgres_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.POSTGRESQL,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/postgres_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.POSTGRESQL,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/snowflake_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.SNOWFLAKE,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/snowflake_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.SNOWFLAKE,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/sqlite_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples/sqlite/",
"extra_backend_dependencies": BackendDependencies.SQLALCHEMY,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples/sqlite/",
"extra_backend_dependencies": BackendDependencies.SQLALCHEMY,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/pandas_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/pandas_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
},
{
"user_flow_script": "tests/integration/docusaurus/template/script_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/spark_yaml_example.py",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/spark_python_example.py",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/spark_yaml_example.py",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/spark_python_example.py",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/mysql_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.MYSQL,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/mysql_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.MYSQL,
},
{
"name": "rule_base_profiler_multi_batch_example",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/docusaurus/expectations/advanced/multi_batch_rule_based_profiler_example.py",
},
]
"""
TODO(cdkini): Kept running into a sqlalchemy.exc.OperationalError when running Snowflake tests.
Per discussion with Will, issue appears to be on the Snowflake side as opposed to something in our code.
Commenting out until we figure out the issue.
"""
integration_test_matrix = [
{
"name": "pandas_one_multi_batch_request_one_validator",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/one_multi_batch_request_one_validator.py",
},
{
"name": "pandas_two_batch_requests_two_validators",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/two_batch_requests_two_validators.py",
"expected_stderrs": "",
"expected_stdouts": "",
},
{
"name": "pandas_multiple_batch_requests_one_validator_multiple_steps",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/multiple_batch_requests_one_validator_multiple_steps.py",
},
{
"name": "pandas_multiple_batch_requests_one_validator_one_step",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/multiple_batch_requests_one_validator_one_step.py",
},
]
def idfn(test_configuration):
return test_configuration.get("user_flow_script")
@pytest.fixture
def pytest_parsed_arguments(request):
return request.config.option
@pytest.mark.docs
@pytest.mark.integration
@pytest.mark.parametrize("test_configuration", docs_test_matrix, ids=idfn)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python3.7")
def test_docs(test_configuration, tmp_path, pytest_parsed_arguments):
_check_for_skipped_tests(pytest_parsed_arguments, test_configuration)
_execute_integration_test(test_configuration, tmp_path)
@pytest.mark.integration
@pytest.mark.parametrize("test_configuration", integration_test_matrix, ids=idfn)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python3.7")
def test_integration_tests(test_configuration, tmp_path, pytest_parsed_arguments):
_check_for_skipped_tests(pytest_parsed_arguments, test_configuration)
_execute_integration_test(test_configuration, tmp_path)
def _execute_integration_test(test_configuration, tmp_path):
"""
Prepare and environment and run integration tests from a list of tests.
Note that the only required parameter for a test in the matrix is
`user_flow_script` and that all other parameters are optional.
"""
assert (
"user_flow_script" in test_configuration.keys()
), "a `user_flow_script` is required"
workdir = os.getcwd()
try:
base_dir = test_configuration.get(
"base_dir", file_relative_path(__file__, "../../")
)
os.chdir(tmp_path)
# Ensure GE is installed in our environment
ge_requirement = test_configuration.get("ge_requirement", "great_expectations")
execute_shell_command(f"pip install {ge_requirement}")
#
# Build test state
#
# DataContext
if test_configuration.get("data_context_dir"):
context_source_dir = os.path.join(
base_dir, test_configuration.get("data_context_dir")
)
test_context_dir = os.path.join(tmp_path, "great_expectations")
shutil.copytree(
context_source_dir,
test_context_dir,
)
# Test Data
if test_configuration.get("data_dir") is not None:
source_data_dir = os.path.join(base_dir, test_configuration.get("data_dir"))
test_data_dir = os.path.join(tmp_path, "data")
shutil.copytree(
source_data_dir,
test_data_dir,
)
# UAT Script
script_source = os.path.join(
base_dir,
test_configuration.get("user_flow_script"),
)
script_path = os.path.join(tmp_path, "test_script.py")
shutil.copyfile(script_source, script_path)
# Util Script
if test_configuration.get("util_script") is not None:
script_source = os.path.join(
base_dir,
test_configuration.get("util_script"),
)
util_script_path = os.path.join(tmp_path, "util.py")
shutil.copyfile(script_source, util_script_path)
# Check initial state
# Execute test
res = subprocess.run(["python", script_path], capture_output=True)
# Check final state
expected_stderrs = test_configuration.get("expected_stderrs")
expected_stdouts = test_configuration.get("expected_stdouts")
expected_failure = test_configuration.get("expected_failure")
outs = res.stdout.decode("utf-8")
errs = res.stderr.decode("utf-8")
print(outs)
print(errs)
if expected_stderrs:
assert expected_stderrs == errs
if expected_stdouts:
assert expected_stdouts == outs
if expected_failure:
assert res.returncode != 0
else:
assert res.returncode == 0
except:
raise
finally:
os.chdir(workdir)
def _check_for_skipped_tests(pytest_args, test_configuration) -> None:
"""Enable scripts to be skipped based on pytest invocation flags."""
dependencies = test_configuration.get("extra_backend_dependencies", None)
if not dependencies:
return
elif dependencies == BackendDependencies.POSTGRESQL and (
pytest_args.no_postgresql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping postgres tests")
elif dependencies == BackendDependencies.MYSQL and (
not pytest_args.mysql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping mysql tests")
elif dependencies == BackendDependencies.MSSQL and (
pytest_args.no_mssql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping mssql tests")
elif dependencies == BackendDependencies.BIGQUERY and pytest_args.no_sqlalchemy:
pytest.skip("Skipping bigquery tests")
elif dependencies == BackendDependencies.REDSHIFT and pytest_args.no_sqlalchemy:
pytest.skip("Skipping redshift tests")
elif dependencies == BackendDependencies.SPARK and pytest_args.no_spark:
pytest.skip("Skipping spark tests")
elif dependencies == BackendDependencies.SNOWFLAKE and pytest_args.no_sqlalchemy:
pytest.skip("Skipping snowflake tests")
| apache-2.0 |
cognitiveclass/edx-platform | lms/djangoapps/instructor/services.py | 39 | 2955 | """
Implementation of "Instructor" service
"""
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from courseware.models import StudentModule
from instructor.views.tools import get_student_from_identifier
from django.core.exceptions import ObjectDoesNotExist
import instructor.enrollment as enrollment
from student.roles import CourseStaffRole
from student import auth
log = logging.getLogger(__name__)
class InstructorService(object):
"""
Instructor service for deleting the students attempt(s) of an exam. This service has been created
for the edx_proctoring's dependency injection to cater for a requirement where edx_proctoring
needs to call into edx-platform's functions to delete the students' existing answers, grades
and attempt counts if there had been an earlier attempt.
"""
def delete_student_attempt(self, student_identifier, course_id, content_id):
"""
Deletes student state for a problem.
Takes some of the following query parameters
- student_identifier is an email or username
- content_id is a url-name of a problem
- course_id is the id for the course
"""
course_id = CourseKey.from_string(course_id)
try:
student = get_student_from_identifier(student_identifier)
except ObjectDoesNotExist:
err_msg = (
'Error occurred while attempting to reset student attempts for user '
'{student_identifier} for content_id {content_id}. '
'User does not exist!'.format(
student_identifier=student_identifier,
content_id=content_id
)
)
log.error(err_msg)
return
try:
module_state_key = UsageKey.from_string(content_id)
except InvalidKeyError:
err_msg = (
'Invalid content_id {content_id}!'.format(content_id=content_id)
)
log.error(err_msg)
return
if student:
try:
enrollment.reset_student_attempts(course_id, student, module_state_key, delete_module=True)
except (StudentModule.DoesNotExist, enrollment.sub_api.SubmissionError):
err_msg = (
'Error occurred while attempting to reset student attempts for user '
'{student_identifier} for content_id {content_id}.'.format(
student_identifier=student_identifier,
content_id=content_id
)
)
log.error(err_msg)
def is_course_staff(self, user, course_id):
"""
Returns True if the user is the course staff
else Returns False
"""
return auth.user_has_role(user, CourseStaffRole(CourseKey.from_string(course_id)))
| agpl-3.0 |
Avira/pootle | pootle/apps/accounts/migrations/0001_initial.py | 12 | 2906 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import re
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters', unique=True, max_length=30, verbose_name='Username', validators=[django.core.validators.RegexValidator(re.compile(b'^[\\w.@+-]+$'), 'Enter a valid username.', b'invalid')])),
('email', models.EmailField(max_length=255, verbose_name='Email Address')),
('full_name', models.CharField(max_length=255, verbose_name='Full Name', blank=True)),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='Active')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='Superuser Status')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('unit_rows', models.SmallIntegerField(default=9, verbose_name='Number of Rows')),
('rate', models.FloatField(default=0, verbose_name='Rate')),
('review_rate', models.FloatField(default=0, verbose_name='Review Rate')),
('hourly_rate', models.FloatField(default=0, verbose_name='Hourly Rate')),
('score', models.FloatField(default=0, verbose_name='Score')),
('currency', models.CharField(blank=True, max_length=3, null=True, verbose_name='Currency', choices=[(b'USD', b'USD'), (b'EUR', b'EUR'), (b'CNY', b'CNY'), (b'JPY', b'JPY')])),
('is_employee', models.BooleanField(default=False, verbose_name='Is employee?')),
('twitter', models.CharField(max_length=15, null=True, verbose_name='Twitter', blank=True)),
('website', models.URLField(null=True, verbose_name='Website', blank=True)),
('linkedin', models.URLField(null=True, verbose_name='LinkedIn', blank=True)),
('bio', models.TextField(null=True, verbose_name='Short Bio', blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| gpl-3.0 |
ActionAdam/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x002.py | 216 | 3889 | data = (
'A', # 0x00
'a', # 0x01
'A', # 0x02
'a', # 0x03
'E', # 0x04
'e', # 0x05
'E', # 0x06
'e', # 0x07
'I', # 0x08
'i', # 0x09
'I', # 0x0a
'i', # 0x0b
'O', # 0x0c
'o', # 0x0d
'O', # 0x0e
'o', # 0x0f
'R', # 0x10
'r', # 0x11
'R', # 0x12
'r', # 0x13
'U', # 0x14
'u', # 0x15
'U', # 0x16
'u', # 0x17
'S', # 0x18
's', # 0x19
'T', # 0x1a
't', # 0x1b
'Y', # 0x1c
'y', # 0x1d
'H', # 0x1e
'h', # 0x1f
'N', # 0x20
'd', # 0x21
'OU', # 0x22
'ou', # 0x23
'Z', # 0x24
'z', # 0x25
'A', # 0x26
'a', # 0x27
'E', # 0x28
'e', # 0x29
'O', # 0x2a
'o', # 0x2b
'O', # 0x2c
'o', # 0x2d
'O', # 0x2e
'o', # 0x2f
'O', # 0x30
'o', # 0x31
'Y', # 0x32
'y', # 0x33
'l', # 0x34
'n', # 0x35
't', # 0x36
'j', # 0x37
'db', # 0x38
'qp', # 0x39
'A', # 0x3a
'C', # 0x3b
'c', # 0x3c
'L', # 0x3d
'T', # 0x3e
's', # 0x3f
'z', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'B', # 0x43
'U', # 0x44
'^', # 0x45
'E', # 0x46
'e', # 0x47
'J', # 0x48
'j', # 0x49
'q', # 0x4a
'q', # 0x4b
'R', # 0x4c
'r', # 0x4d
'Y', # 0x4e
'y', # 0x4f
'a', # 0x50
'a', # 0x51
'a', # 0x52
'b', # 0x53
'o', # 0x54
'c', # 0x55
'd', # 0x56
'd', # 0x57
'e', # 0x58
'@', # 0x59
'@', # 0x5a
'e', # 0x5b
'e', # 0x5c
'e', # 0x5d
'e', # 0x5e
'j', # 0x5f
'g', # 0x60
'g', # 0x61
'g', # 0x62
'g', # 0x63
'u', # 0x64
'Y', # 0x65
'h', # 0x66
'h', # 0x67
'i', # 0x68
'i', # 0x69
'I', # 0x6a
'l', # 0x6b
'l', # 0x6c
'l', # 0x6d
'lZ', # 0x6e
'W', # 0x6f
'W', # 0x70
'm', # 0x71
'n', # 0x72
'n', # 0x73
'n', # 0x74
'o', # 0x75
'OE', # 0x76
'O', # 0x77
'F', # 0x78
'r', # 0x79
'r', # 0x7a
'r', # 0x7b
'r', # 0x7c
'r', # 0x7d
'r', # 0x7e
'r', # 0x7f
'R', # 0x80
'R', # 0x81
's', # 0x82
'S', # 0x83
'j', # 0x84
'S', # 0x85
'S', # 0x86
't', # 0x87
't', # 0x88
'u', # 0x89
'U', # 0x8a
'v', # 0x8b
'^', # 0x8c
'w', # 0x8d
'y', # 0x8e
'Y', # 0x8f
'z', # 0x90
'z', # 0x91
'Z', # 0x92
'Z', # 0x93
'?', # 0x94
'?', # 0x95
'?', # 0x96
'C', # 0x97
'@', # 0x98
'B', # 0x99
'E', # 0x9a
'G', # 0x9b
'H', # 0x9c
'j', # 0x9d
'k', # 0x9e
'L', # 0x9f
'q', # 0xa0
'?', # 0xa1
'?', # 0xa2
'dz', # 0xa3
'dZ', # 0xa4
'dz', # 0xa5
'ts', # 0xa6
'tS', # 0xa7
'tC', # 0xa8
'fN', # 0xa9
'ls', # 0xaa
'lz', # 0xab
'WW', # 0xac
']]', # 0xad
'h', # 0xae
'h', # 0xaf
'k', # 0xb0
'h', # 0xb1
'j', # 0xb2
'r', # 0xb3
'r', # 0xb4
'r', # 0xb5
'r', # 0xb6
'w', # 0xb7
'y', # 0xb8
'\'', # 0xb9
'"', # 0xba
'`', # 0xbb
'\'', # 0xbc
'`', # 0xbd
'`', # 0xbe
'\'', # 0xbf
'?', # 0xc0
'?', # 0xc1
'<', # 0xc2
'>', # 0xc3
'^', # 0xc4
'V', # 0xc5
'^', # 0xc6
'V', # 0xc7
'\'', # 0xc8
'-', # 0xc9
'/', # 0xca
'\\', # 0xcb
',', # 0xcc
'_', # 0xcd
'\\', # 0xce
'/', # 0xcf
':', # 0xd0
'.', # 0xd1
'`', # 0xd2
'\'', # 0xd3
'^', # 0xd4
'V', # 0xd5
'+', # 0xd6
'-', # 0xd7
'V', # 0xd8
'.', # 0xd9
'@', # 0xda
',', # 0xdb
'~', # 0xdc
'"', # 0xdd
'R', # 0xde
'X', # 0xdf
'G', # 0xe0
'l', # 0xe1
's', # 0xe2
'x', # 0xe3
'?', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'V', # 0xec
'=', # 0xed
'"', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
rembo10/headphones | lib/pytz/exceptions.py | 657 | 1333 | '''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| gpl-3.0 |
BryceLohr/authentic | authentic2/idp/attributes.py | 1 | 23270 | '''
VERIDIC - Towards a centralized access control system
Copyright (C) 2011 Mikael Ates
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import lasso
from authentic2.attribute_aggregator.models import AttributeSource
from authentic2.attribute_aggregator.mapping_loader import ATTRIBUTE_MAPPING
from authentic2.attribute_aggregator.core import get_def_name_from_oid, \
get_def_name_from_name_and_ns_of_attribute, \
load_or_create_user_profile, get_oid_from_def_name, \
get_attribute_name_in_namespace, get_definition_from_alias, \
get_attribute_friendly_name_in_namespace
from authentic2.attribute_aggregator.utils import oid_to_urn, urn_to_oid
from authentic2.saml.models import LibertyProvider
from authentic2.idp.models import get_attribute_policy
logger = logging.getLogger('authentic2.idp.attributes')
def add_data_to_dic(attributes, name, values,
format=lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI,
namespace_out='Default', required=False):
logger.debug('add_data_to_dic: dic is at beginning %s' % attributes)
logger.debug('add_data_to_dic: ask to add %s with %s and values %s' \
% (name, format, str(values)))
if format == lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI:
if namespace_out == 'Default':
logger.debug('add_data_to_dic: out in default')
if (oid_to_urn(get_oid_from_def_name(name)),
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, name) in attributes:
old_values = attributes[(oid_to_urn(get_oid_from_def_name(name)),
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, name)]
for value in values:
if not value in old_values:
old_values.append(value)
attributes[(oid_to_urn(get_oid_from_def_name(name)),
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, name)] = \
old_values
logger.debug('add_data_to_dic: updated %s %s %s %s' \
% (oid_to_urn(get_oid_from_def_name(name)),
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, name,
str(old_values)))
else:
attributes[(oid_to_urn(get_oid_from_def_name(name)),
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, name)] = values
logger.debug('add_data_to_dic: added %s %s %s %s' \
% (oid_to_urn(get_oid_from_def_name(name)),
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, name, str(values)))
else:
logger.debug('add_data_to_dic: out in %s' \
% namespace_out)
name_in_ns = get_attribute_name_in_namespace(name, namespace_out)
if name_in_ns:
fn = \
get_attribute_friendly_name_in_namespace(name, namespace_out)
if fn:
if (name_in_ns, lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI,
fn) in attributes:
old_values = attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, fn)]
for value in values:
if not value in old_values:
old_values.append(value)
attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, fn)] = \
old_values
logger.debug('add_data_to_dic: updated %s %s %s %s' \
% (name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, fn,
str(old_values)))
else:
attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI, fn)] \
= values
logger.debug('add_data_to_dic: added %s %s %s %s' \
% (name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI,
fn, str(values)))
else:
if (name_in_ns, lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI) \
in attributes:
old_values = attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI)]
for value in values:
if not value in old_values:
old_values.append(value)
attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI)] = \
old_values
logger.debug('add_data_to_dic: updated %s %s %s' \
% (name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI,
str(old_values)))
else:
attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI)] = values
logger.debug('add_data_to_dic: added %s %s %s' \
% (name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI,
str(values)))
elif required:
raise Exception('Missing a required attribute')
else:
logger.info('add_data_to_dic: The attribute %s \
is not found in %s' % (name, namespace_out))
else:
if namespace_out == 'Default':
logger.debug('add_data_to_dic: out in default')
if (name, lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC) \
in attributes:
old_values = attributes[(name,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC)]
for value in values:
if not value in old_values:
old_values.append(value)
attributes[(name,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC)] = \
old_values
logger.debug('add_data_to_dic: updated %s %s %s' \
% (name,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC,
str(old_values)))
else:
attributes[(name,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC)] = values
logger.debug('add_data_to_dic: added %s %s %s' % (name,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC, str(values)))
else:
logger.debug('add_data_to_dic: out in %s' \
% namespace_out)
name_in_ns = get_attribute_name_in_namespace(name, namespace_out)
if name_in_ns:
if (name_in_ns, lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC) \
in attributes:
old_values = attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC)]
for value in values:
if not value in old_values:
old_values.append(value)
attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC)] = \
old_values
logger.debug('add_data_to_dic: updated %s %s %s' \
% (name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC,
str(old_values)))
else:
attributes[(name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC)] = values
logger.debug('add_data_to_dic: added %s %s %s' \
% (name_in_ns,
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_BASIC, str(values)))
elif required:
raise Exception('Missing a required attribute')
else:
logger.info('add_data_to_dic: The attribute %s \
is not found in %s' % (name, namespace_out))
logger.debug('add_data_to_dic: dic is now %s' % attributes)
def provide_attributes_at_sso(request, user, audience, **kwargs):
'''This function is called by a service provider asynrhonous binding at
sso login. The call is made by the signal add_attributes_to_response.
In parameter, the service provider id and the user authenticated.'''
if not user or not audience:
return None
logger.debug('provide_attributes_at_sso: search attribute for %s' \
% user)
logger.debug('provide_attributes_at_sso: attributes for %s' \
% audience)
provider = None
try:
provider = LibertyProvider.objects.get(entity_id=audience)
except:
logger.debug('provide_attributes_at_sso: Provider with name %s not \
found' % audience)
attribute_policy = get_attribute_policy(provider)
if not attribute_policy:
logger.debug('provide_attributes_at_sso: no attribute policy found \
for %s' % audience)
return None
p = load_or_create_user_profile(user=user)
if not p:
logger.error('provide_attributes_at_sso: unable to load or create a \
profile for %s' % user)
return None
logger.debug('provide_attributes_at_sso: profile loaded %s' % p)
'''Returned dictionnary'''
dic = dict()
attributes = dict()
# If profile already filled in that session, we should offer to skip
# filling.
list_pull = attribute_policy.attribute_list_for_sso_from_pull_sources
if not list_pull:
logger.debug('provide_attributes_at_sso: no attribute list found \
from pull source')
else:
logger.debug('provide_attributes_at_sso: found attribute list named \
%s' % list_pull.name)
l = list_pull.attributes.all()
if not l:
logger.debug('provide_attributes_at_sso: The list is empty')
else:
logger.debug('provide_attributes_at_sso: the list contains %s' \
% [a for a in l])
logger.debug('provide_attributes_at_sso: load in profile %s' \
% [a.attribute_name for a in l])
p.load_listed_attributes([a.attribute_name \
for a in l if not a.source])
l_with_source = dict()
for a in l:
if a.source:
if a.source in l_with_source:
l_with_source[a.source].append(a.attribute_name)
else:
l_with_source[a.source] = [a.attribute_name]
for source, defs in l_with_source.items():
if source.name == 'PROCESSING':
continue
auth_source = False
if source.name == 'AUTH_BACKEND':
auth_source = True
p.load_listed_attributes_with_source(defs, source,
auth_source=auth_source)
logger.debug('provide_attributes_at_sso: process profile')
context = ('sso', {'audience' : audience})
p.process(context)
for a in l:
data = None
if a.source:
logger.debug('provide_attributes_at_sso: %s must be \
provided by %s' % (a.attribute_name, a.source))
data = \
p.get_data_of_definition_and_source(\
a.attribute_name, a.source)
else:
data = p.get_data_of_definition(a.attribute_name)
'''The freshest'''
if not data:
logger.debug('provide_attributes_at_sso: %s not found' \
% a.attribute_name)
if attribute_policy.\
send_error_and_no_attrs_if_missing_required_attrs \
and a.required:
raise Exception('Missing a required attribute')
else:
logger.debug('provide_attributes_at_sso: found %s' \
% [x.__unicode__() for x in data])
# d = data.sort(key=lambda x: x.expiration_date, reverse=True)[0]
d = data[0]
try:
add_data_to_dic(attributes, a.attribute_name,
d.get_values(),
a.output_name_format,
a.output_namespace,
# Send error if required and attribute required
(attribute_policy.\
send_error_and_no_attrs_if_missing_required_attrs \
and a.required))
except:
# Missing required attribute
pass
logger.debug('provide_attributes_at_sso: attributes returned \
from pull source %s' % str(attributes))
if attribute_policy.forward_attributes_from_push_sources \
and request and request.session \
and 'multisource_attributes' in request.session:
'''
Treat attributes in session
'''
logger.debug('provide_attributes_at_sso: attributes is session are \
%s' % str(request.session['multisource_attributes']))
attrs = {}
sources = \
attribute_policy.source_filter_for_sso_from_push_sources.all()
if sources:
s_names = [s.name for s in sources]
logger.debug('provide_attributes_at_sso: filter attributes from \
push source, sources accepted are %s' % str(s_names))
for entity_id, l \
in request.session['multisource_attributes'].items():
if entity_id in s_names:
for token in l:
if 'attributes' in token:
logger.debug('provide_attributes_at_sso: \
keep in dic %s' \
% str({entity_id: token['attributes']}))
attrs.update({entity_id: token['attributes']})
else:
for entity_id, l \
in request.session['multisource_attributes'].items():
for token in l:
if 'attributes' in token:
logger.debug('provide_attributes_at_sso: \
keep in dic %s' \
% str({entity_id: token['attributes']}))
attrs.update({entity_id: token['attributes']})
logger.debug('provide_attributes_at_sso: attributes are %s' \
% str(attrs))
if not attribute_policy.map_attributes_from_push_sources \
and not \
attribute_policy.attribute_filter_for_sso_from_push_sources:
#TODO: Load in profile if possible
for vals in attrs.values():
attributes.update(vals)
else:
dic_to_load_in_profile = dict()
definitions = list()
for entity_id, attrs_list in attrs.items():
source = None
if attribute_policy.map_attributes_from_push_sources:
try:
source = AttributeSource.objects.get(name=entity_id)
except:
try:
lp = \
LibertyProvider.objects.get(entity_id=entity_id)
source = AttributeSource.objects.get(name=lp.name)
except:
pass
namespace_in = 'Default'
if not source:
logger.debug('provide_attributes_at_sso: Not \
attribute source found for %s' \
% str(attributes))
else:
logger.debug('provide_attributes_at_sso: Source \
found %s' % source.name)
namespace_in = source.namespace
logger.debug('provide_attributes_at_sso: \
input namespace is %s' % namespace_in)
dic_to_load_in_profile[entity_id] = list()
for key, values in attrs_list.items():
logger.debug('provide_attributes_at_sso: treat \
%s' % str(attrs))
found = False
try:
name, format, fname = key
found = True
except:
try:
name, format = key
found = True
except:
try:
name = key
found = True
except:
pass
if found:
logger.debug('provide_attributes_at_sso: \
attribute with name %s' % str(name))
if namespace_in == 'Default':
definition = None
if name in ATTRIBUTE_MAPPING:
definition = name
else:
definition = \
get_def_name_from_oid(urn_to_oid(name))
if not definition:
definition = \
get_definition_from_alias(name)
if definition:
logger.debug('provide_attributes_at_sso: \
found definition %s' % definition)
definitions.append(definition)
dic_to_load_in_profile[entity_id].\
append({'definition': definition,
'values': values})
else:
definition = \
get_def_name_from_name_and_ns_of_attribute(name,
namespace_in)
if definition:
logger.debug('provide_attributes_at_sso: \
found definition %s' % definition)
definitions.append(definition)
dic_to_load_in_profile[entity_id].\
append({'name': name,
'namespace': namespace_in,
'values': values})
else:
logger.debug('provide_attributes_at_sso: \
unknown format')
'''
Load in profile to deal with input mapping
'''
logger.debug('provide_attributes_at_sso: \
load in profile %s' % str(dic_to_load_in_profile))
p.load_by_dic(dic_to_load_in_profile)
if attribute_policy.attribute_filter_for_sso_from_push_sources:
att_l = attribute_policy.\
attribute_filter_for_sso_from_push_sources.\
attributes.all()
if att_l:
for att in att_l:
d = None
if attribute_policy.\
filter_source_of_filtered_attributes \
and att.source:
d = p.get_data_of_definition_and_source(\
att.attribute_name, att.source)
if d:
d = d[0]
else:
d = p.get_freshest_data_of_definition(\
att.attribute_name)
if d:
namespace_out = 'Default'
name_format_out = \
lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI
if attribute_policy.\
map_attributes_of_filtered_attributes:
namespace_out = att.output_namespace
name_format_out = att.output_name_format
elif attribute_policy.\
map_attributes_from_push_sources:
namespace_out = \
attribute_policy.output_namespace
name_format_out = \
attribute_policy.attribute_name_format
logger.debug('provide_attributes_at_sso: \
output namespace %s' % namespace_out)
logger.debug('provide_attributes_at_sso: \
output format %s' % name_format_out)
add_data_to_dic(attributes,
d.definition,
d.get_values(),
name_format_out,
namespace_out,
(attribute_policy.\
send_error_and_no_attrs_if_missing_required_attrs \
and att.required))
else:
namespace_out = 'Default'
name_format_out = lasso.SAML2_ATTRIBUTE_NAME_FORMAT_URI
if attribute_policy.map_attributes_from_push_sources:
namespace_out = attribute_policy.output_namespace
name_format_out = attribute_policy.output_name_format
for definition in definitions:
data = p.get_freshest_data_of_definition(definition)
if data:
add_data_to_dic(attributes,
data.definition,
data.get_values(),
name_format_out,
namespace_out)
logger.debug('provide_attributes_at_sso: attributes returned are \
%s' % str(attributes))
dic['attributes'] = attributes
return dic
| agpl-3.0 |
razvanc-r/godot-python | tests/bindings/lib/py/_io/terminalwriter.py | 175 | 12542 | """
Helper functions for writing to terminals and files.
"""
import sys, os
import py
py3k = sys.version_info[0] >= 3
from py.builtin import text, bytes
win32_and_ctypes = False
colorama = None
if sys.platform == "win32":
try:
import colorama
except ImportError:
try:
import ctypes
win32_and_ctypes = True
except ImportError:
pass
def _getdimensions():
import termios,fcntl,struct
call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
height,width = struct.unpack( "hhhh", call ) [:2]
return height, width
def get_terminal_width():
height = width = 0
try:
height, width = _getdimensions()
except py.builtin._sysex:
raise
except:
# pass to fallback below
pass
if width == 0:
# FALLBACK:
# * some exception happened
# * or this is emacs terminal which reports (0,0)
width = int(os.environ.get('COLUMNS', 80))
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
return width
terminal_width = get_terminal_width()
# XXX unify with _escaped func below
def ansi_print(text, esc, file=None, newline=True, flush=False):
if file is None:
file = sys.stderr
text = text.rstrip()
if esc and not isinstance(esc, tuple):
esc = (esc,)
if esc and sys.platform != "win32" and file.isatty():
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +
'\x1b[0m') # ANSI color code "reset"
if newline:
text += '\n'
if esc and win32_and_ctypes and file.isatty():
if 1 in esc:
bold = True
esc = tuple([x for x in esc if x != 1])
else:
bold = False
esctable = {() : FOREGROUND_WHITE, # normal
(31,): FOREGROUND_RED, # red
(32,): FOREGROUND_GREEN, # green
(33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
(34,): FOREGROUND_BLUE, # blue
(35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
(36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
(37,): FOREGROUND_WHITE, # white
(39,): FOREGROUND_WHITE, # reset
}
attr = esctable.get(esc, FOREGROUND_WHITE)
if bold:
attr |= FOREGROUND_INTENSITY
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
if file is sys.stderr:
handle = GetStdHandle(STD_ERROR_HANDLE)
else:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
attr |= (oldcolors & 0x0f0)
SetConsoleTextAttribute(handle, attr)
while len(text) > 32768:
file.write(text[:32768])
text = text[32768:]
if text:
file.write(text)
SetConsoleTextAttribute(handle, oldcolors)
else:
file.write(text)
if flush:
file.flush()
def should_do_markup(file):
if os.environ.get('PY_COLORS') == '1':
return True
if os.environ.get('PY_COLORS') == '0':
return False
return hasattr(file, 'isatty') and file.isatty() \
and os.environ.get('TERM') != 'dumb' \
and not (sys.platform.startswith('java') and os._name == 'nt')
class TerminalWriter(object):
_esctable = dict(black=30, red=31, green=32, yellow=33,
blue=34, purple=35, cyan=36, white=37,
Black=40, Red=41, Green=42, Yellow=43,
Blue=44, Purple=45, Cyan=46, White=47,
bold=1, light=2, blink=5, invert=7)
# XXX deprecate stringio argument
def __init__(self, file=None, stringio=False, encoding=None):
if file is None:
if stringio:
self.stringio = file = py.io.TextIO()
else:
file = py.std.sys.stdout
elif py.builtin.callable(file) and not (
hasattr(file, "write") and hasattr(file, "flush")):
file = WriteFile(file, encoding=encoding)
if hasattr(file, "isatty") and file.isatty() and colorama:
file = colorama.AnsiToWin32(file).stream
self.encoding = encoding or getattr(file, 'encoding', "utf-8")
self._file = file
self.fullwidth = get_terminal_width()
self.hasmarkup = should_do_markup(file)
self._lastlen = 0
def _escaped(self, text, esc):
if esc and self.hasmarkup:
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +'\x1b[0m')
return text
def markup(self, text, **kw):
esc = []
for name in kw:
if name not in self._esctable:
raise ValueError("unknown markup: %r" %(name,))
if kw[name]:
esc.append(self._esctable[name])
return self._escaped(text, tuple(esc))
def sep(self, sepchar, title=None, fullwidth=None, **kw):
if fullwidth is None:
fullwidth = self.fullwidth
# the goal is to have the line be as long as possible
# under the condition that len(line) <= fullwidth
if sys.platform == "win32":
# if we print in the last column on windows we are on a
# new line but there is no way to verify/neutralize this
# (we may not know the exact line width)
# so let's be defensive to avoid empty lines in the output
fullwidth -= 1
if title is not None:
# we want 2 + 2*len(fill) + len(title) <= fullwidth
# i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
# 2*len(sepchar)*N <= fullwidth - len(title) - 2
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
N = (fullwidth - len(title) - 2) // (2*len(sepchar))
fill = sepchar * N
line = "%s %s %s" % (fill, title, fill)
else:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
line = sepchar * (fullwidth // len(sepchar))
# in some situations there is room for an extra sepchar at the right,
# in particular if we consider that with a sepchar like "_ " the
# trailing space is not important at the end of the line
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **kw)
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
if self.hasmarkup and kw:
markupmsg = self.markup(msg, **kw)
else:
markupmsg = msg
write_out(self._file, markupmsg)
def line(self, s='', **kw):
self.write(s, **kw)
self._checkfill(s)
self.write('\n')
def reline(self, line, **kw):
if not self.hasmarkup:
raise ValueError("cannot use rewrite-line without terminal")
self.write(line, **kw)
self._checkfill(line)
self.write('\r')
self._lastlen = len(line)
def _checkfill(self, line):
diff2last = self._lastlen - len(line)
if diff2last > 0:
self.write(" " * diff2last)
class Win32ConsoleWriter(TerminalWriter):
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
oldcolors = None
if self.hasmarkup and kw:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
default_bg = oldcolors & 0x00F0
attr = default_bg
if kw.pop('bold', False):
attr |= FOREGROUND_INTENSITY
if kw.pop('red', False):
attr |= FOREGROUND_RED
elif kw.pop('blue', False):
attr |= FOREGROUND_BLUE
elif kw.pop('green', False):
attr |= FOREGROUND_GREEN
elif kw.pop('yellow', False):
attr |= FOREGROUND_GREEN|FOREGROUND_RED
else:
attr |= oldcolors & 0x0007
SetConsoleTextAttribute(handle, attr)
write_out(self._file, msg)
if oldcolors:
SetConsoleTextAttribute(handle, oldcolors)
class WriteFile(object):
def __init__(self, writemethod, encoding=None):
self.encoding = encoding
self._writemethod = writemethod
def write(self, data):
if self.encoding:
data = data.encode(self.encoding, "replace")
self._writemethod(data)
def flush(self):
return
if win32_and_ctypes:
TerminalWriter = Win32ConsoleWriter
import ctypes
from ctypes import wintypes
# ctypes access to the Windows console
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0000 # black text
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_WHITE = 0x0007
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
BACKGROUND_BLACK = 0x0000 # background color black
BACKGROUND_BLUE = 0x0010 # background color contains blue.
BACKGROUND_GREEN = 0x0020 # background color contains green.
BACKGROUND_RED = 0x0040 # background color contains red.
BACKGROUND_WHITE = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
SHORT = ctypes.c_short
class COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', wintypes.WORD),
('srWindow', SMALL_RECT),
('dwMaximumWindowSize', COORD)]
_GetStdHandle = ctypes.windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [wintypes.DWORD]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(kind):
return _GetStdHandle(kind)
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
SetConsoleTextAttribute.restype = wintypes.BOOL
_GetConsoleScreenBufferInfo = \
ctypes.windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleInfo(handle):
info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
return info
def _getdimensions():
handle = GetStdHandle(STD_OUTPUT_HANDLE)
info = GetConsoleInfo(handle)
# Substract one from the width, otherwise the cursor wraps
# and the ending \n causes an empty line to display.
return info.dwSize.Y, info.dwSize.X - 1
def write_out(fil, msg):
# XXX sometimes "msg" is of type bytes, sometimes text which
# complicates the situation. Should we try to enforce unicode?
try:
# on py27 and above writing out to sys.stdout with an encoding
# should usually work for unicode messages (if the encoding is
# capable of it)
fil.write(msg)
except UnicodeEncodeError:
# on py26 it might not work because stdout expects bytes
if fil.encoding:
try:
fil.write(msg.encode(fil.encoding))
except UnicodeEncodeError:
# it might still fail if the encoding is not capable
pass
else:
fil.flush()
return
# fallback: escape all unicode characters
msg = msg.encode("unicode-escape").decode("ascii")
fil.write(msg)
fil.flush()
| mit |
saradbowman/osf.io | addons/wiki/migrations/0002_auto_20170323_1534.py | 28 | 1222 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-23 20:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('osf', '0001_initial'),
('addons_wiki', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='nodewikipage',
name='node',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='osf.AbstractNode'),
),
migrations.AddField(
model_name='nodewikipage',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='nodesettings',
name='owner',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_wiki_node_settings', to='osf.AbstractNode'),
),
]
| apache-2.0 |
campbe13/openhatch | vendor/packages/Django/django/db/backends/oracle/creation.py | 64 | 12122 | import sys
import time
from django.db.backends.creation import BaseDatabaseCreation
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
def set_autocommit(self):
self.connection.connection.autocommit = True
| agpl-3.0 |
baderj/domain_generation_algorithms | locky/dgav2.py | 1 | 4140 | import argparse
from datetime import datetime
config = {
1: {
# md5: b28da1f2a5d889594a0e77e0853bcf29
# sha256: 34bd23cbb9cf301ba444e1696694527ffc59edaba2bbbe25c4ac28a90df6f52a
# sample: https://malwr.com/analysis/OTE2NzIwODkxMWUzNDBlNzhmNGZlMmFjY2ExOWEyZjQ/
'seed': 62,
'shift': 7,
'mod': 8,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
},
2: {
# md5: e35bf0901f1a7db2b3752f85cd1bd044
# sha256: 199a28336f5d3d8051754495741de1ad1bfa89919e6c44192ba34a3441d14c3d
# sample: https://malwr.com/analysis/YjEyNWRhNTg0MzFiNDUwNzlkMmEyNjRmMzliNGMxODI/
'seed': 75,
'shift': 7,
'mod': 8,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
},
3: {
# md5: 872018251970f0a739aa71d3cd313efa
# sha256: 1a45085e959a449637a89174b1737f4d03d7e73dd7acfa3cfb96042a735cf400
# sample:
'seed': 9,
'shift': 7,
'mod': 8,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
},
4: {
# md5: 3f118d0b888430ab9f58fc2589207988
# sha256: f927efd7cd2da3a052d857632f78ccf04b673e2774f6ce9a075e654dfd77d940
# sample: https://malwr.com/analysis/N2M2YmFkMzY1MTY3NGIzZmJiNWMzZDU5ZDVhZjBlNjk/
'seed': 7,
'shift': 7,
'mod': 8,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
},
5: {
# md5: 32e2c73ed8da34d87c64267936e632cb
# sha256: d4dc820457bbc557b14ec0e58358646afbba70f4d5cab2276cdac8ce631a3854
# sample: https://malwr.com/analysis/NjVkMTBlOTdhMjAwNDUxZTk2ZGFkMjc0OTQxNDBlMTk/
'seed': 0,
'shift': 5,
'mod': 6,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
},
6: {
# md5: d2ea1565ae004368655edb5169b56a0f
# sha256: 9b4b37cbb9845b093867675fb898330a8bd7ed087d587cba8cd21064c9a6e526
# sample: https://sandbox.deepviz.com/report/hash/d2ea1565ae004368655edb5169b56a0f/
'seed': 660,
'shift': 7,
'mod': 8,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
},
7: {
# md5: 69b933a694710f8ceb314dc897a94cbe
# sha256: 07bed9baa42996bded75dacf5c2611ba5d3a3f19b8588ea734530f74c2586087
# sample: https://malwr.com/analysis/MTM0ODY5N2VlOWQ1NGQ4YWFiNTI5ZGYxOWNlMTEwMWU/
'seed': 555,
'shift': 7,
'mod': 8,
'tlds': ['ru', 'pw', 'eu', 'in', 'yt', 'pm', 'us', 'fr', 'de',
'it', 'be', 'uk', 'nl', 'tf']
}
}
def ror32(v, s):
v &= 0xFFFFFFFF
return ((v >> s) | (v << (32-s))) & 0xFFFFFFFF
def rol32(v, s):
return ((v << s) | (v >> (32-s))) & 0xFFFFFFFF
def dga(date, config_nr, domain_nr):
c = config[config_nr]
t = ror32( 0xB11924E1*(date.year + 0x1BF5), c['shift'])
if c['seed']:
t = ror32( 0xB11924E1*(t + c['seed'] + 0x27100001), c['shift'])
t = ror32( 0xB11924E1*(t + (date.day//2) + 0x27100001), c['shift'])
t = ror32( 0xB11924E1*(t + date.month + 0x2709A354), c['shift'])
nr = rol32(domain_nr % c['mod'], 21)
s = rol32(c['seed'], 17)
r = (ror32(0xB11924E1*(nr + t + s + 0x27100001), c['shift']) + 0x27100001) & 0xFFFFFFFF
length = (r % 11) + 5
domain = ""
for i in range(length):
r = (ror32(0xB11924E1*rol32(r, i), c['shift']) + 0x27100001) & 0xFFFFFFFF
domain += chr(r % 25 + ord('a'))
domain += '.'
r = ror32(r*0xB11924E1, c['shift'])
tlds = c['tlds']
tld_i = ((r + 0x27100001) & 0xffffffff) % len(tlds)
domain += tlds[tld_i]
return domain
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--date",
help="date for which to generate domains")
parser.add_argument("-c", "--config", choices=list(range(1,8)),
help="config nr", type=int, default=1)
args = parser.parse_args()
if args.date:
d = datetime.strptime(args.date, "%Y-%m-%d")
else:
d = datetime.now()
for i in range(8):
print( dga(d, args.config, i) )
| gpl-2.0 |
ngageoint/scale | scale/storage/test/messages/test_delete_files.py | 1 | 15154 | from __future__ import unicode_literals
import os
import django
from django.test import TestCase
from job.test import utils as job_test_utils
from product.models import FileAncestryLink
from product.test import utils as product_test_utils
from storage.messages.delete_files import create_delete_files_messages, DeleteFiles
from storage.models import PurgeResults, ScaleFile
from storage.test import utils as storage_test_utils
import trigger.test.utils as trigger_test_utils
class TestDeleteFiles(TestCase):
def setUp(self):
django.setup()
self.source_file = storage_test_utils.create_file(file_type='SOURCE')
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=self.source_file.id, trigger_event=trigger)
def test_json(self):
"""Tests coverting a DeleteFiles message to and from JSON"""
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=self.source_file.id, trigger_event=trigger)
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
# Add files to message
message = DeleteFiles()
message.purge = True
message.job_id = job.id
message.trigger_id = trigger.id
message.source_file_id = self.source_file.id
if message.can_fit_more():
message.add_file(file_1.id)
if message.can_fit_more():
message.add_file(file_2.id)
# Convert message to JSON and back, and then execute
message_json_dict = message.to_json()
new_message = DeleteFiles.from_json(message_json_dict)
result = new_message.execute()
self.assertTrue(result)
# Both files should have been deleted
self.assertEqual(ScaleFile.objects.filter(id=file_1.id).count(), 0)
self.assertEqual(ScaleFile.objects.filter(id=file_2.id).count(), 0)
# One new job for create_purge_jobs_messages
self.assertEqual(len(new_message.new_messages), 1)
def test_execute(self):
"""Tests calling DeleteFile.execute() successfully"""
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=self.source_file.id, trigger_event=trigger)
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file1.json')
file_path_3 = os.path.join('my_dir', 'my_file2.json')
file_path_4 = os.path.join('my_dir', 'my_file3.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
file_3 = storage_test_utils.create_file(file_path=file_path_3, job_exe=job_exe)
file_4 = storage_test_utils.create_file(file_path=file_path_4, job_exe=job_exe)
# Add files to message
message = DeleteFiles()
message.purge = False
message.job_id = job.id
message.trigger_id = trigger.id
message.source_file_id = self.source_file.id
if message.can_fit_more():
message.add_file(file_1.id)
if message.can_fit_more():
message.add_file(file_2.id)
if message.can_fit_more():
message.add_file(file_3.id)
if message.can_fit_more():
message.add_file(file_4.id)
# Execute message
result = message.execute()
self.assertTrue(result)
# All files should have marked as deleted
self.assertTrue(ScaleFile.objects.filter(id=file_1.id).values('is_deleted'))
self.assertTrue(ScaleFile.objects.filter(id=file_2.id).values('is_deleted'))
self.assertTrue(ScaleFile.objects.filter(id=file_3.id).values('is_deleted'))
self.assertTrue(ScaleFile.objects.filter(id=file_4.id).values('is_deleted'))
file_5 = storage_test_utils.create_file(file_path=file_path_1)
file_6 = storage_test_utils.create_file(file_path=file_path_2)
file_7 = storage_test_utils.create_file(file_path=file_path_3)
file_8 = storage_test_utils.create_file(file_path=file_path_4)
# Add files to message
message = DeleteFiles()
message.purge = True
message.job_id = job.id
message.trigger_id = trigger.id
message.source_file_id = self.source_file.id
if message.can_fit_more():
message.add_file(file_5.id)
if message.can_fit_more():
message.add_file(file_6.id)
if message.can_fit_more():
message.add_file(file_7.id)
if message.can_fit_more():
message.add_file(file_8.id)
# Execute message
result = message.execute()
self.assertTrue(result)
# All files should have been purged
self.assertEqual(ScaleFile.objects.filter(id=file_5.id).count(), 0)
self.assertEqual(ScaleFile.objects.filter(id=file_6.id).count(), 0)
self.assertEqual(ScaleFile.objects.filter(id=file_7.id).count(), 0)
self.assertEqual(ScaleFile.objects.filter(id=file_8.id).count(), 0)
def test_create_message(self):
"""
Tests calling the create message function for DeleteFiles
"""
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
trigger = trigger_test_utils.create_trigger_event()
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file1.json')
file_path_3 = os.path.join('my_dir', 'my_file2.json')
file_path_4 = os.path.join('my_dir', 'my_file3.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
file_3 = storage_test_utils.create_file(file_path=file_path_3, job_exe=job_exe)
file_4 = storage_test_utils.create_file(file_path=file_path_4, job_exe=job_exe)
files = [file_1, file_2, file_3, file_4]
# No exception is success
create_delete_files_messages(files=files, job_id=job.id, trigger_id=trigger.id,
source_file_id=self.source_file.id, purge=True)
def test_execute_check_results(self):
"""Tests calling DeleteFile.execute() successfully"""
# Create PurgeResults entry
source_file = storage_test_utils.create_file()
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=source_file.id, trigger_event=trigger)
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=source_file.id), 0)
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file1.json')
file_path_3 = os.path.join('my_dir', 'my_file2.json')
file_path_4 = os.path.join('my_dir', 'my_file3.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
file_3 = storage_test_utils.create_file(file_path=file_path_3, job_exe=job_exe)
file_4 = storage_test_utils.create_file(file_path=file_path_4, job_exe=job_exe)
# Add files to message
message = DeleteFiles()
message.purge = True
message.job_id = job.id
message.source_file_id = source_file.id
message.trigger_id = trigger.id
if message.can_fit_more():
message.add_file(file_1.id)
if message.can_fit_more():
message.add_file(file_2.id)
if message.can_fit_more():
message.add_file(file_3.id)
if message.can_fit_more():
message.add_file(file_4.id)
# Execute message
result = message.execute()
self.assertTrue(result)
# Check results are accurate
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=source_file.id), 4)
def test_execute_force_stop(self):
"""Tests calling DeleteFile.execute() successfully"""
# Create PurgeResults entry
source_file = storage_test_utils.create_file()
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=source_file.id, trigger_event=trigger, force_stop_purge=True)
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=source_file.id), 0)
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file1.json')
file_path_3 = os.path.join('my_dir', 'my_file2.json')
file_path_4 = os.path.join('my_dir', 'my_file3.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
file_3 = storage_test_utils.create_file(file_path=file_path_3, job_exe=job_exe)
file_4 = storage_test_utils.create_file(file_path=file_path_4, job_exe=job_exe)
# Add files to message
message = DeleteFiles()
message.purge = True
message.job_id = job.id
message.source_file_id = source_file.id
message.trigger_id = trigger.id
if message.can_fit_more():
message.add_file(file_1.id)
if message.can_fit_more():
message.add_file(file_2.id)
if message.can_fit_more():
message.add_file(file_3.id)
if message.can_fit_more():
message.add_file(file_4.id)
# Execute message
result = message.execute()
self.assertTrue(result)
# Check results are accurate
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=source_file.id), 0)
def test_execute_check_results_no_purge(self):
"""Tests calling DeleteFile.execute() successfully"""
# Create PurgeResults entry
source_file = storage_test_utils.create_file()
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=source_file.id, trigger_event=trigger)
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=source_file.id), 0)
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file1.json')
file_path_3 = os.path.join('my_dir', 'my_file2.json')
file_path_4 = os.path.join('my_dir', 'my_file3.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
file_3 = storage_test_utils.create_file(file_path=file_path_3, job_exe=job_exe)
file_4 = storage_test_utils.create_file(file_path=file_path_4, job_exe=job_exe)
# Add files to message
message = DeleteFiles()
message.purge = False
message.job_id = job.id
message.source_file_id = source_file.id
message.trigger_id = trigger.id
if message.can_fit_more():
message.add_file(file_1.id)
if message.can_fit_more():
message.add_file(file_2.id)
if message.can_fit_more():
message.add_file(file_3.id)
if message.can_fit_more():
message.add_file(file_4.id)
# Execute message
result = message.execute()
self.assertTrue(result)
# Check results are accurate. Should be 0 since purge flag was False
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=self.source_file.id), 0)
def test_execute_ancestor(self):
"""Tests calling DeleteFile.execute() successfully"""
# Create PurgeResults entry
source_file = storage_test_utils.create_file()
trigger = trigger_test_utils.create_trigger_event()
PurgeResults.objects.create(source_file_id=source_file.id, trigger_event=trigger)
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
source_file_id=source_file.id), 0)
job = job_test_utils.create_job()
job_exe = job_test_utils.create_job_exe(job=job)
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file1.json')
file_path_3 = os.path.join('my_dir', 'my_file2.json')
file_path_4 = os.path.join('my_dir', 'my_file3.json')
file_1 = storage_test_utils.create_file(file_path=file_path_1, job_exe=job_exe)
file_2 = storage_test_utils.create_file(file_path=file_path_2, job_exe=job_exe)
file_3 = storage_test_utils.create_file(file_path=file_path_3, job_exe=job_exe)
file_4 = storage_test_utils.create_file(file_path=file_path_4, job_exe=job_exe)
product_test_utils.create_file_link(ancestor=source_file, descendant=file_1, job=job, job_exe=job_exe)
product_test_utils.create_file_link(ancestor=source_file, descendant=file_2, job=job, job_exe=job_exe)
product_test_utils.create_file_link(ancestor=source_file, descendant=file_3, job=job, job_exe=job_exe)
product_test_utils.create_file_link(ancestor=source_file, descendant=file_4, job=job, job_exe=job_exe)
# Add files to message
message = DeleteFiles()
message.purge = True
message.job_id = job.id
message.source_file_id = source_file.id
message.trigger_id = trigger.id
if message.can_fit_more():
message.add_file(file_1.id)
if message.can_fit_more():
message.add_file(file_2.id)
if message.can_fit_more():
message.add_file(file_3.id)
if message.can_fit_more():
message.add_file(file_4.id)
# Execute message
result = message.execute()
self.assertTrue(result)
# Check results are accurate.
self.assertEqual(PurgeResults.objects.values_list('num_products_deleted', flat=True).get(
trigger_event=trigger), 4)
| apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/models/official/recommendation/ncf_test.py | 4 | 10501 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NCF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import mock
import numpy as np
import tensorflow as tf
from absl import flags
from absl.testing import flagsaver
from official.recommendation import constants as rconst
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
from official.recommendation import ncf_main
from official.recommendation import stat_utils
NUM_TRAIN_NEG = 4
class NcfTest(tf.test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(NcfTest, cls).setUpClass()
ncf_main.define_ncf_flags()
def setUp(self):
self.top_k_old = rconst.TOP_K
self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES
rconst.NUM_EVAL_NEGATIVES = 2
def tearDown(self):
rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old
rconst.TOP_K = self.top_k_old
def get_hit_rate_and_ndcg(self, predicted_scores_by_user, items_by_user,
top_k=rconst.TOP_K, match_mlperf=False):
rconst.TOP_K = top_k
rconst.NUM_EVAL_NEGATIVES = predicted_scores_by_user.shape[1] - 1
g = tf.Graph()
with g.as_default():
logits = tf.convert_to_tensor(
predicted_scores_by_user.reshape((-1, 1)), tf.float32)
softmax_logits = tf.concat([tf.zeros(logits.shape, dtype=logits.dtype),
logits], axis=1)
duplicate_mask = tf.convert_to_tensor(
stat_utils.mask_duplicates(items_by_user, axis=1), tf.float32)
metric_ops = neumf_model.compute_eval_loss_and_metrics(
logits=logits, softmax_logits=softmax_logits,
duplicate_mask=duplicate_mask, num_training_neg=NUM_TRAIN_NEG,
match_mlperf=match_mlperf).eval_metric_ops
hr = metric_ops[rconst.HR_KEY]
ndcg = metric_ops[rconst.NDCG_KEY]
init = [tf.global_variables_initializer(),
tf.local_variables_initializer()]
with self.test_session(graph=g) as sess:
sess.run(init)
return sess.run([hr[1], ndcg[1]])
def test_hit_rate_and_ndcg(self):
# Test with no duplicate items
predictions = np.array([
[1., 2., 0.], # In top 2
[2., 1., 0.], # In top 1
[0., 2., 1.], # In top 3
[2., 3., 4.] # In top 3
])
items = np.array([
[1, 2, 3],
[2, 3, 1],
[3, 2, 1],
[2, 1, 3],
])
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,
match_mlperf=True)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,
match_mlperf=True)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
# Test with duplicate items. In the MLPerf case, we treat the duplicates as
# a single item. Otherwise, we treat the duplicates as separate items.
predictions = np.array([
[1., 2., 2., 3.], # In top 4. MLPerf: In top 3
[3., 1., 0., 2.], # In top 1. MLPerf: In top 1
[0., 2., 3., 2.], # In top 4. MLPerf: In top 3
[3., 2., 4., 2.] # In top 2. MLPerf: In top 2
])
items = np.array([
[1, 2, 2, 3],
[1, 2, 3, 4],
[1, 2, 3, 2],
[4, 3, 2, 1],
])
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(5)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,
match_mlperf=True)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,
match_mlperf=True)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
# Test with duplicate items, where the predictions for the same item can
# differ. In the MLPerf case, we should take the first prediction.
predictions = np.array([
[3., 2., 4., 4.], # In top 3. MLPerf: In top 2
[3., 4., 2., 4.], # In top 3. MLPerf: In top 3
[2., 3., 4., 1.], # In top 3. MLPerf: In top 2
[4., 3., 5., 2.] # In top 2. MLPerf: In top 1
])
items = np.array([
[1, 2, 2, 3],
[4, 3, 3, 2],
[2, 1, 1, 1],
[4, 2, 2, 1],
])
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)
self.assertAlmostEqual(hr, 0 / 4)
self.assertAlmostEqual(ndcg, 0 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +
3 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +
3 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,
match_mlperf=True)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,
match_mlperf=True)
self.assertAlmostEqual(hr, 3 / 4)
self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +
math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +
math.log(2) / math.log(4)) / 4)
_BASE_END_TO_END_FLAGS = {
"batch_size": 1024,
"train_epochs": 1,
"use_synthetic_data": True
}
@flagsaver.flagsaver(**_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end(self):
ncf_main.main(None)
@flagsaver.flagsaver(ml_perf=True, **_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end_mlperf(self):
ncf_main.main(None)
@flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end_no_estimator(self):
ncf_main.main(None)
flags.FLAGS.ml_perf = True
ncf_main.main(None)
@flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end_while_loop(self):
# We cannot set use_while_loop = True in the flagsaver constructor, because
# if the flagsaver sets it to True before setting use_estimator to False,
# the flag validator will throw an error.
flags.FLAGS.use_while_loop = True
ncf_main.main(None)
flags.FLAGS.ml_perf = True
ncf_main.main(None)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main()
| apache-2.0 |
JPFrancoia/scikit-learn | sklearn/decomposition/dict_learning.py | 13 | 46149 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init : array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
verbose :
Degree of output the procedure will print.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
jat255/hyperspyUI | hyperspyui/plugins/virtual_aperture.py | 3 | 4204 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
from hyperspyui.plugins.plugin import Plugin
import numpy as np
import hyperspy.api as hs
from hyperspy.drawing import utils
from functools import partial
class VirtualBfDf(Plugin):
name = "Virtual BF/DF"
def __init__(self, *args, **kwargs):
super(VirtualBfDf, self).__init__(*args, **kwargs)
self._rois = []
def create_actions(self):
self.add_action(self.name + '.virtual_navigator',
"Virtual navigator",
self.virtual_navigator,
tip="Set the navigator inesity by a virtual aperture")
self.add_action(self.name + '.virtual_aperture',
"Virtual aperture",
self.virtual_aperture,
tip="Add a virtual aperture to the diffraction image")
self.add_action(self.name + '.virtual_annulus',
"Virtual annulus",
self.virtual_annulus,
tip="Add a virtual annulus to the diffraction image")
def create_menu(self):
self.add_menuitem(
'Diffraction', self.ui.actions[self.name + '.virtual_navigator'])
self.add_menuitem(
'Diffraction', self.ui.actions[self.name + '.virtual_aperture'])
self.add_menuitem(
'Diffraction', self.ui.actions[self.name + '.virtual_annulus'])
def _on_close(self, roi):
for w in roi.widgets:
w.close()
roi.signal_map.clear()
if roi in self._rois:
self._rois.remove(roi)
def virtual_navigator(self, signal=None):
return self.virtual_aperture(signal=signal, navigate=True)
def virtual_annulus(self, signal=None):
return self.virtual_aperture(signal=signal, annulus=True)
def virtual_aperture(self, signal=None, annulus=False, navigate=False):
ui = self.ui
if signal is None:
signal = ui.get_selected_signal()
dd = np.array([a.high_value + a.low_value for a in
signal.axes_manager.signal_axes]) / 2.0
r = hs.roi.CircleROI(dd[0], dd[1],
signal.axes_manager.signal_axes[0].scale*3)
if annulus:
r.r_inner = signal.axes_manager.signal_axes[0].scale*2
s_virtual = r.interactive(signal, None,
axes=signal.axes_manager.signal_axes)
s_nav = hs.interactive(
s_virtual.mean,
s_virtual.events.data_changed,
axis=s_virtual.axes_manager.signal_axes)
s_nav.axes_manager.set_signal_dimension(2)
if navigate:
signal.plot(navigator=s_nav)
signal._plot.navigator_plot.update()
s_nav.events.data_changed.connect(
signal._plot.navigator_plot.update, [])
utils.on_figure_window_close(
signal._plot.navigator_plot.figure,
partial(self._on_close, r))
else:
s_nav.plot()
utils.on_figure_window_close(
s_nav._plot.signal_plot.figure,
partial(self._on_close, r))
if navigate:
r.add_widget(signal, axes=signal.axes_manager.signal_axes,
color='darkorange')
else:
r.add_widget(signal, axes=signal.axes_manager.signal_axes)
self._rois.append(r)
self.record_code("<p>.virtual_aperture(navigate=%s)" % navigate)
| gpl-3.0 |
philoniare/horizon | horizon/tables/base.py | 2 | 71299 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
import logging
from operator import attrgetter
import sys
from django.core import exceptions as core_exceptions
from django.core import urlresolvers
from django import forms
from django.http import HttpResponse # noqa
from django import template
from django.template.defaultfilters import slugify # noqa
from django.template.defaultfilters import truncatechars # noqa
from django.template.loader import render_to_string
from django.utils.html import escape
from django.utils import http
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils import termcolors
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon.tables.actions import FilterAction # noqa
from horizon.tables.actions import LinkAction # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
STRING_SEPARATOR = "__"
@six.python_2_unicode_compatible
class Column(html.HTMLElement):
"""A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized if the ``transform`` is not callable,
otherwise it defaults to an empty string (``""``).
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to ``True``.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: allowed_data_types
A list of data types for which the link should be created.
Default is an empty list (``[]``).
When the list is empty and the ``link`` attribute is not None, all the
rows under this column will be links.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('yes', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
.. attribute:: display_choices
A tuple of tuples representing the possible values to substitute
the data when displayed in the column cell.
.. attribute:: empty_value
A string or callable to be used for cells which have no data.
Defaults to the string ``"-"``.
.. attribute:: summation
A string containing the name of a summation method to be used in
the generation of a summary row for this column. By default the
options are ``"sum"`` or ``"average"``, which behave as expected.
Optional.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
.. attribute:: classes
An iterable of CSS classes which should be added to this column.
Example: ``classes=('foo', 'bar')``.
.. attribute:: attrs
A dict of HTML attribute strings which should be added to this column.
Example: ``attrs={"data-foo": "bar"}``.
.. attribute:: cell_attributes_getter
A callable to get the HTML attributes of a column cell depending
on the data. For example, to add additional description or help
information for data in a column cell (e.g. in Images panel, for the
column 'format')::
helpText = {
'ARI':'Amazon Ramdisk Image'
'QCOW2':'QEMU' Emulator'
}
getHoverHelp(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
...
...
cell_attributes_getter = getHoverHelp
.. attribute:: truncate
An integer for the maximum length of the string in this column. If the
length of the data in this column is larger than the supplied number,
the data for this column will be truncated and an ellipsis will be
appended to the truncated data.
Defaults to ``None``.
.. attribute:: link_classes
An iterable of CSS classes which will be added when the column's text
is displayed as a link.
This is left for backward compatibility. Deprecated in favor of the
link_attributes attribute.
Example: ``link_classes=('link-foo', 'link-bar')``.
Defaults to ``None``.
.. attribute:: wrap_list
Boolean value indicating whether the contents of this cell should be
wrapped in a ``<ul></ul>`` tag. Useful in conjunction with Django's
``unordered_list`` template filter. Defaults to ``False``.
.. attribute:: form_field
A form field used for inline editing of the column. A django
forms.Field can be used or django form.Widget can be used.
Example: ``form_field=forms.CharField(required=True)``.
Defaults to ``None``.
.. attribute:: form_field_attributes
The additional html attributes that will be rendered to form_field.
Example: ``form_field_attributes={'class': 'bold_input_field'}``.
Defaults to ``None``.
.. attribute:: update_action
The class that inherits from tables.actions.UpdateAction, update_cell
method takes care of saving inline edited data. The tables.base.Row
get_data method needs to be connected to table for obtaining the data.
Example: ``update_action=UpdateCell``.
Defaults to ``None``.
.. attribute:: link_attrs
A dict of HTML attribute strings which should be added when the
column's text is displayed as a link.
Examples:
``link_attrs={"data-foo": "bar"}``.
``link_attrs={"target": "_blank", "class": "link-foo link-bar"}``.
Defaults to ``None``.
.. attribute:: help_text
A string of simple help text displayed in a tooltip when you hover
over the help icon beside the Column name. Defaults to ``None``.
"""
summation_methods = {
"sum": sum,
"average": lambda data: sum(data, 0.0) / len(data)
}
# Used to retain order when instantiating columns on a table
creation_counter = 0
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('yes', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=True,
link=None, allowed_data_types=[], hidden=False, attrs=None,
status=False, status_choices=None, display_choices=None,
empty_value=None, filters=None, classes=None, summation=None,
auto=None, truncate=None, link_classes=None, wrap_list=False,
form_field=None, form_field_attributes=None,
update_action=None, link_attrs=None,
cell_attributes_getter=None, help_text=None):
self.classes = list(classes or getattr(self, "classes", []))
super(Column, self).__init__()
self.attrs.update(attrs or {})
if callable(transform):
self.transform = transform
self.name = "<%s callable>" % transform.__name__
else:
self.transform = six.text_type(transform)
self.name = self.transform
# Empty string is a valid value for verbose_name
if verbose_name is None:
if callable(transform):
self.verbose_name = ''
else:
self.verbose_name = self.transform.title()
else:
self.verbose_name = verbose_name
self.auto = auto
self.sortable = sortable
self.link = link
self.allowed_data_types = allowed_data_types
self.hidden = hidden
self.status = status
self.empty_value = empty_value or _('-')
self.filters = filters or []
self.truncate = truncate
self.wrap_list = wrap_list
self.form_field = form_field
self.form_field_attributes = form_field_attributes or {}
self.update_action = update_action
self.link_attrs = link_attrs or {}
self.help_text = help_text
if link_classes:
self.link_attrs['class'] = ' '.join(link_classes)
self.cell_attributes_getter = cell_attributes_getter
if status_choices:
self.status_choices = status_choices
self.display_choices = display_choices
if summation is not None and summation not in self.summation_methods:
raise ValueError("Summation method %s must be one of %s."
% (summation,
", ".join(self.summation_methods.keys())))
self.summation = summation
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
if self.sortable and not self.auto:
self.classes.append("sortable")
if self.hidden:
self.classes.append("hide")
if self.link is not None:
self.classes.append('anchor')
def __str__(self):
return six.text_type(self.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
def get_raw_data(self, datum):
"""Returns the raw data for this column, before any filters or
formatting are applied to it. This is useful when doing calculations
on data in the table.
"""
# Callable transformations
if callable(self.transform):
data = self.transform(datum)
# Dict lookups
elif isinstance(datum, collections.Mapping) and \
self.transform in datum:
data = datum.get(self.transform)
else:
# Basic object lookups
try:
data = getattr(datum, self.transform)
except AttributeError:
msg = _("The attribute %(attr)s doesn't exist on "
"%(obj)s.") % {'attr': self.transform, 'obj': datum}
msg = termcolors.colorize(msg, **PALETTE['ERROR'])
LOG.warning(msg)
data = None
return data
def get_data(self, datum):
"""Returns the final display data for this column from the given
inputs.
The return value will be either the attribute specified for this column
or the return value of the attr:`~horizon.tables.Column.transform`
method for this column.
"""
datum_id = self.table.get_object_id(datum)
if datum_id in self.table._data_cache[self]:
return self.table._data_cache[self][datum_id]
data = self.get_raw_data(datum)
display_value = None
if self.display_choices:
display_value = [display for (value, display) in
self.display_choices
if value.lower() == (data or '').lower()]
if display_value:
data = display_value[0]
else:
for filter_func in self.filters:
try:
data = filter_func(data)
except Exception:
msg = ("Filter '%(filter)s' failed with data "
"'%(data)s' on column '%(col_name)s'")
args = {'filter': filter_func.__name__,
'data': data,
'col_name': six.text_type(self.verbose_name)}
LOG.warning(msg, args)
if data and self.truncate:
data = truncatechars(data, self.truncate)
self.table._data_cache[self][datum_id] = data
return self.table._data_cache[self][datum_id]
def get_link_url(self, datum):
"""Returns the final value for the column's ``link`` property.
If ``allowed_data_types`` of this column is not empty and the datum
has an assigned type, check if the datum's type is in the
``allowed_data_types`` list. If not, the datum won't be displayed
as a link.
If ``link`` is a callable, it will be passed the current data object
and should return a URL. Otherwise ``get_link_url`` will attempt to
call ``reverse`` on ``link`` with the object's id as a parameter.
Failing that, it will simply return the value of ``link``.
"""
if self.allowed_data_types:
data_type_name = self.table._meta.data_type_name
data_type = getattr(datum, data_type_name, None)
if data_type and (data_type not in self.allowed_data_types):
return None
obj_id = self.table.get_object_id(datum)
if callable(self.link):
return self.link(datum)
try:
return urlresolvers.reverse(self.link, args=(obj_id,))
except urlresolvers.NoReverseMatch:
return self.link
def get_summation(self):
"""Returns the summary value for the data in this column if a
valid summation method is specified for it. Otherwise returns ``None``.
"""
if self.summation not in self.summation_methods:
return None
summation_function = self.summation_methods[self.summation]
data = [self.get_raw_data(datum) for datum in self.table.data]
data = filter(lambda datum: datum is not None, data)
if len(data):
try:
summation = summation_function(data)
for filter_func in self.filters:
summation = filter_func(summation)
return summation
except TypeError:
pass
return None
class Row(html.HTMLElement):
"""Represents a row in the table.
When iterated, the ``Row`` instance will yield each of its cells.
Rows are capable of AJAX updating, with a little added work:
The ``ajax`` property needs to be set to ``True``, and
subclasses need to define a ``get_data`` method which returns a data
object appropriate for consumption by the table (effectively the "get"
lookup versus the table's "list" lookup).
The automatic update interval is configurable by setting the key
``ajax_poll_interval`` in the ``HORIZON_CONFIG`` dictionary.
Default: ``2500`` (measured in milliseconds).
.. attribute:: table
The table which this row belongs to.
.. attribute:: datum
The data object which this row represents.
.. attribute:: id
A string uniquely representing this row composed of the table name
and the row data object's identifier.
.. attribute:: cells
The cells belonging to this row stored in a ``OrderedDict`` object.
This attribute is populated during instantiation.
.. attribute:: status
Boolean value representing the status of this row calculated from
the values of the table's ``status_columns`` if they are set.
.. attribute:: status_class
Returns a css class for the status of the row based on ``status``.
.. attribute:: ajax
Boolean value to determine whether ajax updating for this row is
enabled.
.. attribute:: ajax_action_name
String that is used for the query parameter key to request AJAX
updates. Generally you won't need to change this value.
Default: ``"row_update"``.
.. attribute:: ajax_cell_action_name
String that is used for the query parameter key to request AJAX
updates of cell. Generally you won't need to change this value.
It is also used for inline edit of the cell.
Default: ``"cell_update"``.
"""
ajax = False
ajax_action_name = "row_update"
ajax_cell_action_name = "cell_update"
def __init__(self, table, datum=None):
super(Row, self).__init__()
self.table = table
self.datum = datum
self.selected = False
if self.datum:
self.load_cells()
else:
self.id = None
self.cells = []
def load_cells(self, datum=None):
"""Load the row's data (either provided at initialization or as an
argument to this function), initialize all the cells contained
by this row, and set the appropriate row properties which require
the row's data to be determined.
This function is called automatically by
:meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is
provided. However, by not providing the data during initialization
this function allows for the possibility of a two-step loading
pattern when you need a row instance but don't yet have the data
available.
"""
# Compile all the cells on instantiation.
table = self.table
if datum:
self.datum = datum
else:
datum = self.datum
cells = []
for column in table.columns.values():
cell = table._meta.cell_class(datum, column, self)
cells.append((column.name or column.auto, cell))
self.cells = collections.OrderedDict(cells)
if self.ajax:
interval = conf.HORIZON_CONFIG['ajax_poll_interval']
self.attrs['data-update-interval'] = interval
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.classes.append("ajax-update")
self.attrs['data-object-id'] = table.get_object_id(datum)
# Add the row's status class and id to the attributes to be rendered.
self.classes.append(self.status_class)
id_vals = {"table": self.table.name,
"sep": STRING_SEPARATOR,
"id": table.get_object_id(datum)}
self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals
self.attrs['id'] = self.id
# Add the row's display name if available
display_name = table.get_object_display(datum)
if display_name:
self.attrs['data-display'] = escape(display_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __iter__(self):
return iter(self.cells.values())
@property
def status(self):
column_names = self.table._meta.status_columns
if column_names:
statuses = dict([(column_name, self.cells[column_name].status) for
column_name in column_names])
return self.table.calculate_row_status(statuses)
@property
def status_class(self):
column_names = self.table._meta.status_columns
if column_names:
return self.table.get_row_status_class(self.status)
else:
return ''
def render(self):
return render_to_string("horizon/common/_data_table_row.html",
{"row": self})
def get_cells(self):
"""Returns the bound cells for this row in order."""
return self.cells.values()
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(collections.OrderedDict([
("action", self.ajax_action_name),
("table", self.table.name),
("obj_id", self.table.get_object_id(self.datum))
]))
return "%s?%s" % (table_url, params)
def can_be_selected(self, datum):
"""By default if multiselect enabled return True. You can remove the
checkbox after an ajax update here if required.
"""
return True
def get_data(self, request, obj_id):
"""Fetches the updated data for the row based on the object id
passed in. Must be implemented by a subclass to allow AJAX updating.
"""
return {}
class Cell(html.HTMLElement):
"""Represents a single cell in the table."""
def __init__(self, datum, column, row, attrs=None, classes=None):
self.classes = classes or getattr(self, "classes", [])
super(Cell, self).__init__()
self.attrs.update(attrs or {})
self.datum = datum
self.column = column
self.row = row
self.wrap_list = column.wrap_list
self.inline_edit_available = self.column.update_action is not None
# initialize the update action if available
if self.inline_edit_available:
self.update_action = self.column.update_action()
self.attrs['data-cell-name'] = column.name
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.inline_edit_mod = False
# add tooltip to cells if the truncate variable is set
if column.truncate:
# NOTE(tsufiev): trying to pull cell raw data out of datum for
# those columns where truncate is False leads to multiple errors
# in unit tests
data = getattr(datum, column.name, '') or ''
if len(data) > column.truncate:
self.attrs['data-toggle'] = 'tooltip'
self.attrs['title'] = data
self.attrs['data-selenium'] = data
self.data = self.get_data(datum, column, row)
def get_data(self, datum, column, row):
"""Fetches the data to be displayed in this cell."""
table = row.table
if column.auto == "multi_select":
data = ""
if row.can_be_selected(datum):
widget = forms.CheckboxInput(check_test=lambda value: False)
# Convert value to string to avoid accidental type conversion
data = widget.render('object_ids',
six.text_type(table.get_object_id(datum)),
{'class': 'table-row-multi-select'})
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "form_field":
widget = column.form_field
if issubclass(widget.__class__, forms.Field):
widget = widget.widget
widget_name = "%s__%s" % \
(column.name,
six.text_type(table.get_object_id(datum)))
# Create local copy of attributes, so it don't change column
# class form_field_attributes
form_field_attributes = {}
form_field_attributes.update(column.form_field_attributes)
# Adding id of the input so it pairs with label correctly
form_field_attributes['id'] = widget_name
if (template.defaultfilters.urlize in column.filters or
template.defaultfilters.yesno in column.filters):
data = widget.render(widget_name,
column.get_raw_data(datum),
form_field_attributes)
else:
data = widget.render(widget_name,
column.get_data(datum),
form_field_attributes)
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "actions":
data = table.render_row_actions(datum, pull_right=False)
table._data_cache[column][table.get_object_id(datum)] = data
else:
data = column.get_data(datum)
if column.cell_attributes_getter:
cell_attributes = column.cell_attributes_getter(data) or {}
self.attrs.update(cell_attributes)
return data
def __repr__(self):
return '<%s: %s, %s>' % (self.__class__.__name__,
self.column.name,
self.row.id)
@property
def id(self):
return ("%s__%s" % (self.column.name,
six.text_type(self.row.table.get_object_id(self.datum))))
@property
def value(self):
"""Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum)
if data is None:
if callable(self.column.empty_value):
data = self.column.empty_value(self.datum)
else:
data = self.column.empty_value
except Exception:
data = None
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
if self.url and not self.column.auto == "form_field":
link_attrs = ' '.join(['%s="%s"' % (k, v) for (k, v) in
self.column.link_attrs.items()])
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s" %s>%s</a>' % (
(escape(self.url),
link_attrs,
escape(six.text_type(data)))))
return data
@property
def url(self):
if self.column.link:
url = self.column.get_link_url(self.datum)
if url:
return url
else:
return None
@property
def status(self):
"""Gets the status for the column based on the cell's data."""
# Deal with status column mechanics based in this cell's data
if hasattr(self, '_status'):
return self._status
if self.column.status or \
self.column.name in self.column.table._meta.status_columns:
# returns the first matching status found
data_status_lower = six.text_type(
self.column.get_raw_data(self.datum)).lower()
for status_name, status_value in self.column.status_choices:
if six.text_type(status_name).lower() == data_status_lower:
self._status = status_value
return self._status
self._status = None
return self._status
def get_status_class(self, status):
"""Returns a css class name determined by the status value."""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_default_classes(self):
"""Returns a flattened string of the cell's CSS classes."""
if not self.url:
self.column.classes = [cls for cls in self.column.classes
if cls != "anchor"]
column_class_string = self.column.get_final_attrs().get('class', "")
classes = set(column_class_string.split(" "))
if self.column.status:
classes.add(self.get_status_class(self.status))
if self.inline_edit_available:
classes.add("inline_edit_available")
return list(classes)
def get_ajax_update_url(self):
column = self.column
table_url = column.table.get_absolute_url()
params = urlencode(collections.OrderedDict([
("action", self.row.ajax_cell_action_name),
("table", column.table.name),
("cell_name", column.name),
("obj_id", column.table.get_object_id(self.datum))
]))
return "%s?%s" % (table_url, params)
@property
def update_allowed(self):
"""Determines whether update of given cell is allowed.
Calls allowed action of defined UpdateAction of the Column.
"""
return self.update_action.allowed(self.column.table.request,
self.datum,
self)
def render(self):
return render_to_string("horizon/common/_data_table_cell.html",
{"cell": self})
class DataTableOptions(object):
"""Contains options for :class:`.DataTable` objects.
.. attribute:: name
A short name or slug for the table.
.. attribute:: verbose_name
A more verbose name for the table meant for display purposes.
.. attribute:: columns
A list of column objects or column names. Controls ordering/display
of the columns in the table.
.. attribute:: table_actions
A list of action classes derived from the
:class:`~horizon.tables.Action` class. These actions will handle tasks
such as bulk deletion, etc. for multiple objects at once.
.. attribute:: table_actions_menu
A list of action classes similar to ``table_actions`` except these
will be displayed in a menu instead of as individual buttons. Actions
from this list will take precedence over actions from the
``table_actions`` list.
.. attribute:: row_actions
A list similar to ``table_actions`` except tailored to appear for
each row. These actions act on a single object at a time.
.. attribute:: actions_column
Boolean value to control rendering of an additional column containing
the various actions for each row. Defaults to ``True`` if any actions
are specified in the ``row_actions`` option.
.. attribute:: multi_select
Boolean value to control rendering of an extra column with checkboxes
for selecting multiple objects in the table. Defaults to ``True`` if
any actions are specified in the ``table_actions`` option.
.. attribute:: filter
Boolean value to control the display of the "filter" search box
in the table actions. By default it checks whether or not an instance
of :class:`.FilterAction` is in :attr:`.table_actions`.
.. attribute:: template
String containing the template which should be used to render the
table. Defaults to ``"horizon/common/_data_table.html"``.
.. attribute:: context_var_name
The name of the context variable which will contain the table when
it is rendered. Defaults to ``"table"``.
.. attribute:: prev_pagination_param
The name of the query string parameter which will be used when
paginating backward in this table. When using multiple tables in a
single view this will need to be changed to differentiate between the
tables. Default: ``"prev_marker"``.
.. attribute:: pagination_param
The name of the query string parameter which will be used when
paginating forward in this table. When using multiple tables in a
single view this will need to be changed to differentiate between the
tables. Default: ``"marker"``.
.. attribute:: status_columns
A list or tuple of column names which represents the "state"
of the data object being represented.
If ``status_columns`` is set, when the rows are rendered the value
of this column will be used to add an extra class to the row in
the form of ``"status_up"`` or ``"status_down"`` for that row's
data.
The row status is used by other Horizon components to trigger tasks
such as dynamic AJAX updating.
.. attribute:: cell_class
The class which should be used for rendering the cells of this table.
Optional. Default: :class:`~horizon.tables.Cell`.
.. attribute:: row_class
The class which should be used for rendering the rows of this table.
Optional. Default: :class:`~horizon.tables.Row`.
.. attribute:: column_class
The class which should be used for handling the columns of this table.
Optional. Default: :class:`~horizon.tables.Column`.
.. attribute:: css_classes
A custom CSS class or classes to add to the ``<table>`` tag of the
rendered table, for when the particular table requires special styling.
Default: ``""``.
.. attribute:: mixed_data_type
A toggle to indicate if the table accepts two or more types of data.
Optional. Default: ``False``
.. attribute:: data_types
A list of data types that this table would accept. Default to be an
empty list, but if the attribute ``mixed_data_type`` is set to
``True``, then this list must have at least one element.
.. attribute:: data_type_name
The name of an attribute to assign to data passed to the table when it
accepts mix data. Default: ``"_table_data_type"``
.. attribute:: footer
Boolean to control whether or not to show the table's footer.
Default: ``True``.
.. attribute:: hidden_title
Boolean to control whether or not to show the table's title.
Default: ``True``.
.. attribute:: permissions
A list of permission names which this table requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
def __init__(self, options):
self.name = getattr(options, 'name', self.__class__.__name__)
verbose_name = (getattr(options, 'verbose_name', None)
or self.name.title())
self.verbose_name = verbose_name
self.columns = getattr(options, 'columns', None)
self.status_columns = getattr(options, 'status_columns', [])
self.table_actions = getattr(options, 'table_actions', [])
self.row_actions = getattr(options, 'row_actions', [])
self.table_actions_menu = getattr(options, 'table_actions_menu', [])
self.cell_class = getattr(options, 'cell_class', Cell)
self.row_class = getattr(options, 'row_class', Row)
self.column_class = getattr(options, 'column_class', Column)
self.css_classes = getattr(options, 'css_classes', '')
self.prev_pagination_param = getattr(options,
'prev_pagination_param',
'prev_marker')
self.pagination_param = getattr(options, 'pagination_param', 'marker')
self.browser_table = getattr(options, 'browser_table', None)
self.footer = getattr(options, 'footer', True)
self.hidden_title = getattr(options, 'hidden_title', True)
self.no_data_message = getattr(options,
"no_data_message",
_("No items to display."))
self.permissions = getattr(options, 'permissions', [])
# Set self.filter if we have any FilterActions
filter_actions = [action for action in self.table_actions if
issubclass(action, FilterAction)]
if len(filter_actions) > 1:
raise NotImplementedError("Multiple filter actions are not "
"currently supported.")
self.filter = getattr(options, 'filter', len(filter_actions) > 0)
if len(filter_actions) == 1:
self._filter_action = filter_actions.pop()
else:
self._filter_action = None
self.template = getattr(options,
'template',
'horizon/common/_data_table.html')
self.row_actions_dropdown_template = ('horizon/common/_data_table_'
'row_actions_dropdown.html')
self.row_actions_row_template = ('horizon/common/_data_table_'
'row_actions_row.html')
self.table_actions_template = \
'horizon/common/_data_table_table_actions.html'
self.context_var_name = six.text_type(getattr(options,
'context_var_name',
'table'))
self.actions_column = getattr(options,
'actions_column',
len(self.row_actions) > 0)
self.multi_select = getattr(options,
'multi_select',
len(self.table_actions) > 0)
# Set runtime table defaults; not configurable.
self.has_prev_data = False
self.has_more_data = False
# Set mixed data type table attr
self.mixed_data_type = getattr(options, 'mixed_data_type', False)
self.data_types = getattr(options, 'data_types', [])
# If the data_types has more than 2 elements, set mixed_data_type
# to True automatically.
if len(self.data_types) > 1:
self.mixed_data_type = True
# However, if the mixed_data_type is set to True manually and the
# the data_types is empty, raise an error.
if self.mixed_data_type and len(self.data_types) <= 1:
raise ValueError("If mixed_data_type is set to True in class %s, "
"data_types should has more than one types" %
self.name)
self.data_type_name = getattr(options,
'data_type_name',
"_table_data_type")
class DataTableMetaclass(type):
"""Metaclass to add options to DataTable class and collect columns."""
def __new__(mcs, name, bases, attrs):
# Process options from Meta
class_name = name
dt_attrs = {}
dt_attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None))
# Gather columns; this prevents the column from being an attribute
# on the DataTable class and avoids naming conflicts.
columns = []
for attr_name, obj in attrs.items():
if isinstance(obj, (opts.column_class, Column)):
column_instance = attrs[attr_name]
column_instance.name = attr_name
column_instance.classes.append('normal_column')
columns.append((attr_name, column_instance))
else:
dt_attrs[attr_name] = obj
columns.sort(key=lambda x: x[1].creation_counter)
# Iterate in reverse to preserve final order
for base in reversed(bases):
if hasattr(base, 'base_columns'):
columns[0:0] = base.base_columns.items()
dt_attrs['base_columns'] = collections.OrderedDict(columns)
# If the table is in a ResourceBrowser, the column number must meet
# these limits because of the width of the browser.
if opts.browser_table == "navigation" and len(columns) > 3:
raise ValueError("You can assign at most three columns to %s."
% class_name)
if opts.browser_table == "content" and len(columns) > 2:
raise ValueError("You can assign at most two columns to %s."
% class_name)
if opts.columns:
# Remove any columns that weren't declared if we're being explicit
# NOTE: we're iterating a COPY of the list here!
for column_data in columns[:]:
if column_data[0] not in opts.columns:
columns.pop(columns.index(column_data))
# Re-order based on declared columns
columns.sort(key=lambda x: dt_attrs['_meta'].columns.index(x[0]))
# Add in our auto-generated columns
if opts.multi_select and opts.browser_table != "navigation":
multi_select = opts.column_class("multi_select",
verbose_name="",
auto="multi_select")
multi_select.classes.append('multi_select_column')
columns.insert(0, ("multi_select", multi_select))
if opts.actions_column:
actions_column = opts.column_class("actions",
verbose_name=_("Actions"),
auto="actions")
actions_column.classes.append('actions_column')
columns.append(("actions", actions_column))
# Store this set of columns internally so we can copy them per-instance
dt_attrs['_columns'] = collections.OrderedDict(columns)
# Gather and register actions for later access since we only want
# to instantiate them once.
# (list() call gives deterministic sort order, which sets don't have.)
actions = list(set(opts.row_actions) | set(opts.table_actions) |
set(opts.table_actions_menu))
actions.sort(key=attrgetter('name'))
actions_dict = collections.OrderedDict([(action.name, action())
for action in actions])
dt_attrs['base_actions'] = actions_dict
if opts._filter_action:
# Replace our filter action with the instantiated version
opts._filter_action = actions_dict[opts._filter_action.name]
# Create our new class!
return type.__new__(mcs, name, bases, dt_attrs)
@six.python_2_unicode_compatible
@six.add_metaclass(DataTableMetaclass)
class DataTable(object):
"""A class which defines a table with all data and associated actions.
.. attribute:: name
String. Read-only access to the name specified in the
table's Meta options.
.. attribute:: multi_select
Boolean. Read-only access to whether or not this table
should display a column for multi-select checkboxes.
.. attribute:: data
Read-only access to the data this table represents.
.. attribute:: filtered_data
Read-only access to the data this table represents, filtered by
the :meth:`~horizon.tables.FilterAction.filter` method of the table's
:class:`~horizon.tables.FilterAction` class (if one is provided)
using the current request's query parameters.
"""
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
self.request = request
self.data = data
self.kwargs = kwargs
self._needs_form_wrapper = needs_form_wrapper
self._no_data_message = self._meta.no_data_message
self.breadcrumb = None
self.current_item_id = None
self.permissions = self._meta.permissions
# Create a new set
columns = []
for key, _column in self._columns.items():
column = copy.copy(_column)
column.table = self
columns.append((key, column))
self.columns = collections.OrderedDict(columns)
self._populate_data_cache()
# Associate these actions with this table
for action in self.base_actions.values():
action.associate_with_table(self)
self.needs_summary_row = any([col.summation
for col in self.columns.values()])
def __str__(self):
return six.text_type(self._meta.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._meta.name)
@property
def name(self):
return self._meta.name
@property
def footer(self):
return self._meta.footer
@property
def multi_select(self):
return self._meta.multi_select
@property
def filtered_data(self):
# This function should be using django.utils.functional.cached_property
# decorator, but unfortunately due to bug in Django
# https://code.djangoproject.com/ticket/19872 it would make it fail
# when being mocked by mox in tests.
if not hasattr(self, '_filtered_data'):
self._filtered_data = self.data
if self._meta.filter and self._meta._filter_action:
action = self._meta._filter_action
filter_string = self.get_filter_string()
filter_field = self.get_filter_field()
request_method = self.request.method
needs_preloading = (not filter_string
and request_method == 'GET'
and action.needs_preloading)
valid_method = (request_method == action.method)
not_api_filter = (filter_string
and not action.is_api_filter(filter_field))
if valid_method or needs_preloading or not_api_filter:
if self._meta.mixed_data_type:
self._filtered_data = action.data_type_filter(
self, self.data, filter_string)
else:
self._filtered_data = action.filter(
self, self.data, filter_string)
return self._filtered_data
def slugify_name(self):
return str(slugify(self._meta.name))
def get_filter_string(self):
"""Get the filter string value. For 'server' type filters this is
saved in the session so that it gets persisted across table loads.
For other filter types this is obtained from the POST dict.
"""
filter_action = self._meta._filter_action
param_name = filter_action.get_param_name()
filter_string = ''
if filter_action.filter_type == 'server':
filter_string = self.request.session.get(param_name, '')
else:
filter_string = self.request.POST.get(param_name, '')
return filter_string
def get_filter_field(self):
"""Get the filter field value used for 'server' type filters. This
is the value from the filter action's list of filter choices.
"""
filter_action = self._meta._filter_action
param_name = '%s_field' % filter_action.get_param_name()
filter_field = self.request.session.get(param_name, '')
return filter_field
def _populate_data_cache(self):
self._data_cache = {}
# Set up hash tables to store data points for each column
for column in self.get_columns():
self._data_cache[column] = {}
def _filter_action(self, action, request, datum=None):
try:
# Catch user errors in permission functions here
row_matched = True
if self._meta.mixed_data_type:
row_matched = action.data_type_matched(datum)
return action._allowed(request, datum) and row_matched
except Exception:
LOG.exception("Error while checking action permissions.")
return None
def is_browser_table(self):
if self._meta.browser_table:
return True
return False
def render(self):
"""Renders the table using the template from the table options."""
table_template = template.loader.get_template(self._meta.template)
extra_context = {self._meta.context_var_name: self,
'hidden_title': self._meta.hidden_title}
context = template.RequestContext(self.request, extra_context)
return table_template.render(context)
def get_absolute_url(self):
"""Returns the canonical URL for this table.
This is used for the POST action attribute on the form element
wrapping the table. In many cases it is also useful for redirecting
after a successful action on the table.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the table was requested.
"""
return self.request.get_full_path().partition('?')[0]
def get_full_url(self):
"""Returns the full URL path for this table.
This is used for the POST action attribute on the form element
wrapping the table. We use this method to persist the
pagination marker.
"""
return self.request.get_full_path()
def get_empty_message(self):
"""Returns the message to be displayed when there is no data."""
return self._no_data_message
def get_object_by_id(self, lookup):
"""Returns the data object from the table's dataset which matches
the ``lookup`` parameter specified. An error will be raised if
the match is not a single data object.
We will convert the object id and ``lookup`` to unicode before
comparison.
Uses :meth:`~horizon.tables.DataTable.get_object_id` internally.
"""
if not isinstance(lookup, six.text_type):
lookup = six.text_type(str(lookup), 'utf-8')
matches = []
for datum in self.data:
obj_id = self.get_object_id(datum)
if not isinstance(obj_id, six.text_type):
obj_id = six.text_type(str(obj_id), 'utf-8')
if obj_id == lookup:
matches.append(datum)
if len(matches) > 1:
raise ValueError("Multiple matches were returned for that id: %s."
% matches)
if not matches:
raise exceptions.Http302(self.get_absolute_url(),
_('No match returned for the id "%s".')
% lookup)
return matches[0]
@property
def has_actions(self):
"""Boolean. Indicates whether there are any available actions on this
table.
"""
if not self.base_actions:
return False
return any(self.get_table_actions()) or any(self._meta.row_actions)
@property
def needs_form_wrapper(self):
"""Boolean. Indicates whether this table should be rendered wrapped in
a ``<form>`` tag or not.
"""
# If needs_form_wrapper is explicitly set, defer to that.
if self._needs_form_wrapper is not None:
return self._needs_form_wrapper
# Otherwise calculate whether or not we need a form element.
return self.has_actions
def get_table_actions(self):
"""Returns a list of the action instances for this table."""
button_actions = [self.base_actions[action.name] for action in
self._meta.table_actions if
action not in self._meta.table_actions_menu]
menu_actions = [self.base_actions[action.name] for
action in self._meta.table_actions_menu]
bound_actions = button_actions + menu_actions
return [action for action in bound_actions if
self._filter_action(action, self.request)]
def get_row_actions(self, datum):
"""Returns a list of the action instances for a specific row."""
bound_actions = []
for action in self._meta.row_actions:
# Copy to allow modifying properties per row
bound_action = copy.copy(self.base_actions[action.name])
bound_action.attrs = copy.copy(bound_action.attrs)
bound_action.datum = datum
# Remove disallowed actions.
if not self._filter_action(bound_action,
self.request,
datum):
continue
# Hook for modifying actions based on data. No-op by default.
bound_action.update(self.request, datum)
# Pre-create the URL for this link with appropriate parameters
if issubclass(bound_action.__class__, LinkAction):
bound_action.bound_url = bound_action.get_link_url(datum)
bound_actions.append(bound_action)
return bound_actions
def set_multiselect_column_visibility(self, visible=True):
"""hide checkbox column if no current table action is allowed."""
if not self.multi_select:
return
select_column = self.columns.values()[0]
# Try to find if the hidden class need to be
# removed or added based on visible flag.
hidden_found = 'hidden' in select_column.classes
if hidden_found and visible:
select_column.classes.remove('hidden')
elif not hidden_found and not visible:
select_column.classes.append('hidden')
def render_table_actions(self):
"""Renders the actions specified in ``Meta.table_actions``."""
template_path = self._meta.table_actions_template
table_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_table_actions()
extra_context = {"table_actions": bound_actions,
"table_actions_buttons": [],
"table_actions_menu": []}
if self._meta.filter and (
self._filter_action(self._meta._filter_action, self.request)):
extra_context["filter"] = self._meta._filter_action
for action in bound_actions:
if action.__class__ in self._meta.table_actions_menu:
extra_context['table_actions_menu'].append(action)
elif action != extra_context.get('filter'):
extra_context['table_actions_buttons'].append(action)
context = template.RequestContext(self.request, extra_context)
self.set_multiselect_column_visibility(len(bound_actions) > 0)
return table_actions_template.render(context)
def render_row_actions(self, datum, pull_right=True, row=False):
"""Renders the actions specified in ``Meta.row_actions`` using the
current row data. If `row` is True, the actions are rendered in a row
of buttons. Otherwise they are rendered in a dropdown box.
"""
if row:
template_path = self._meta.row_actions_row_template
else:
template_path = self._meta.row_actions_dropdown_template
row_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_row_actions(datum)
extra_context = {"row_actions": bound_actions,
"row_id": self.get_object_id(datum),
"pull_right": pull_right}
context = template.RequestContext(self.request, extra_context)
return row_actions_template.render(context)
@staticmethod
def parse_action(action_string):
"""Parses the ``action`` parameter (a string) sent back with the
POST data. By default this parses a string formatted as
``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns
each of the pieces. The ``row_id`` is optional.
"""
if action_string:
bits = action_string.split(STRING_SEPARATOR)
table = bits[0]
action = bits[1]
try:
object_id = STRING_SEPARATOR.join(bits[2:])
if object_id == '':
object_id = None
except IndexError:
object_id = None
return table, action, object_id
def take_action(self, action_name, obj_id=None, obj_ids=None):
"""Locates the appropriate action and routes the object
data to it. The action should return an HTTP redirect
if successful, or a value which evaluates to ``False``
if unsuccessful.
"""
# See if we have a list of ids
obj_ids = obj_ids or self.request.POST.getlist('object_ids')
action = self.base_actions.get(action_name, None)
if not action or action.method != self.request.method:
# We either didn't get an action or we're being hacked. Goodbye.
return None
# Meanwhile, back in Gotham...
if not action.requires_input or obj_id or obj_ids:
if obj_id:
obj_id = self.sanitize_id(obj_id)
if obj_ids:
obj_ids = [self.sanitize_id(i) for i in obj_ids]
# Single handling is easy
if not action.handles_multiple:
response = action.single(self, self.request, obj_id)
# Otherwise figure out what to pass along
else:
# Preference given to a specific id, since that implies
# the user selected an action for just one row.
if obj_id:
obj_ids = [obj_id]
response = action.multiple(self, self.request, obj_ids)
return response
elif action and action.requires_input and not (obj_id or obj_ids):
messages.info(self.request,
_("Please select a row before taking that action."))
return None
@classmethod
def check_handler(cls, request):
"""Determine whether the request should be handled by this table."""
if request.method == "POST" and "action" in request.POST:
table, action, obj_id = cls.parse_action(request.POST["action"])
elif "table" in request.GET and "action" in request.GET:
table = request.GET["table"]
action = request.GET["action"]
obj_id = request.GET.get("obj_id", None)
else:
table = action = obj_id = None
return table, action, obj_id
def maybe_preempt(self):
"""Determine whether the request should be handled by a preemptive
action on this table or by an AJAX row update before loading any data.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name:
# Handle AJAX row updating.
new_row = self._meta.row_class(self)
if new_row.ajax and new_row.ajax_action_name == action_name:
try:
datum = new_row.get_data(request, obj_id)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
new_row.classes.append('current_selected')
new_row.load_cells(datum)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(new_row.render())
else:
return HttpResponse(status=error.status_code)
elif new_row.ajax_cell_action_name == action_name:
# inline edit of the cell actions
return self.inline_edit_handle(request, table_name,
action_name, obj_id,
new_row)
preemptive_actions = [action for action in
self.base_actions.values() if action.preempt]
if action_name:
for action in preemptive_actions:
if action.name == action_name:
handled = self.take_action(action_name, obj_id)
if handled:
return handled
return None
def inline_edit_handle(self, request, table_name, action_name, obj_id,
new_row):
"""Inline edit handler.
Showing form or handling update by POST of the cell.
"""
try:
cell_name = request.GET['cell_name']
datum = new_row.get_data(request, obj_id)
# TODO(lsmola) extract load cell logic to Cell and load
# only 1 cell. This is kind of ugly.
if request.GET.get('inline_edit_mod') == "true":
new_row.table.columns[cell_name].auto = "form_field"
inline_edit_mod = True
else:
inline_edit_mod = False
# Load the cell and set the inline_edit_mod.
new_row.load_cells(datum)
cell = new_row.cells[cell_name]
cell.inline_edit_mod = inline_edit_mod
# If not allowed, neither edit mod or updating is allowed.
if not cell.update_allowed:
datum_display = (self.get_object_display(datum) or
_("N/A"))
LOG.info('Permission denied to %s: "%s"' %
("Update Action", datum_display))
return HttpResponse(status=401)
# If it is post request, we are updating the cell.
if request.method == "POST":
return self.inline_update_action(request,
datum,
cell,
obj_id,
cell_name)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(cell.render())
else:
return HttpResponse(status=error.status_code)
def inline_update_action(self, request, datum, cell, obj_id, cell_name):
"""Handling update by POST of the cell.
"""
new_cell_value = request.POST.get(
cell_name + '__' + obj_id, None)
if issubclass(cell.column.form_field.__class__,
forms.Field):
try:
# using Django Form Field to parse the
# right value from POST and to validate it
new_cell_value = (
cell.column.form_field.clean(
new_cell_value))
cell.update_action.action(
self.request, datum, obj_id, cell_name, new_cell_value)
response = {
'status': 'updated',
'message': ''
}
return HttpResponse(
json.dumps(response),
status=200,
content_type="application/json")
except core_exceptions.ValidationError:
# if there is a validation error, I will
# return the message to the client
exc_type, exc_value, exc_traceback = (
sys.exc_info())
response = {
'status': 'validation_error',
'message': ' '.join(exc_value.messages)}
return HttpResponse(
json.dumps(response),
status=400,
content_type="application/json")
def maybe_handle(self):
"""Determine whether the request should be handled by any action on
this table after data has been loaded.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name and action_name:
action_names = [action.name for action in
self.base_actions.values() if not action.preempt]
# do not run preemptive actions here
if action_name in action_names:
return self.take_action(action_name, obj_id)
return None
def sanitize_id(self, obj_id):
"""Override to modify an incoming obj_id to match existing
API data types or modify the format.
"""
return obj_id
def get_object_id(self, datum):
"""Returns the identifier for the object this row will represent.
By default this returns an ``id`` attribute on the given object,
but this can be overridden to return other values.
.. warning::
Make sure that the value returned is a unique value for the id
otherwise rendering issues can occur.
"""
return datum.id
def get_object_display(self, datum):
"""Returns a display name that identifies this object.
By default, this returns a ``name`` attribute from the given object,
but this can be overridden to return other values.
"""
if hasattr(datum, 'name'):
return datum.name
return None
def has_prev_data(self):
"""Returns a boolean value indicating whether there is previous data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_prev_data
def has_more_data(self):
"""Returns a boolean value indicating whether there is more data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_more_data
def get_prev_marker(self):
"""Returns the identifier for the first object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[0])) \
if self.data else ''
def get_marker(self):
"""Returns the identifier for the last object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[-1])) \
if self.data else ''
def get_prev_pagination_string(self):
"""Returns the query parameter string to paginate this table
to the previous page.
"""
return "=".join([self._meta.prev_pagination_param,
self.get_prev_marker()])
def get_pagination_string(self):
"""Returns the query parameter string to paginate this table
to the next page.
"""
return "=".join([self._meta.pagination_param, self.get_marker()])
def calculate_row_status(self, statuses):
"""Returns a boolean value determining the overall row status
based on the dictionary of column name to status mappings passed in.
By default, it uses the following logic:
#. If any statuses are ``False``, return ``False``.
#. If no statuses are ``False`` but any or ``None``, return ``None``.
#. If all statuses are ``True``, return ``True``.
This provides the greatest protection against false positives without
weighting any particular columns.
The ``statuses`` parameter is passed in as a dictionary mapping
column names to their statuses in order to allow this function to
be overridden in such a way as to weight one column's status over
another should that behavior be desired.
"""
values = statuses.values()
if any([status is False for status in values]):
return False
elif any([status is None for status in values]):
return None
else:
return True
def get_row_status_class(self, status):
"""Returns a css class name determined by the status value. This class
name is used to indicate the status of the rows in the table if
any ``status_columns`` have been specified.
"""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_columns(self):
"""Returns this table's columns including auto-generated ones."""
return self.columns.values()
def get_rows(self):
"""Return the row data for this table broken out by columns."""
rows = []
try:
for datum in self.filtered_data:
row = self._meta.row_class(self, datum)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
row.classes.append('current_selected')
rows.append(row)
except Exception:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
return rows
def css_classes(self):
"""Returns the additional CSS class to be added to <table> tag."""
return self._meta.css_classes
| apache-2.0 |
arulkumarkandasamy/clictest | clictest/common/utils.py | 1 | 25644 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 SoftLayer Technologies, Inc.
# Copyright 2015 Mirantis, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
try:
from eventlet import sleep
except ImportError:
from time import sleep
from eventlet.green import socket
import functools
import os
import re
import uuid
from OpenSSL import crypto
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import strutils
import six
from webob import exc
from clictest.common import exception
from clictest.common import timeutils
from clictest.i18n import _, _LE, _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Whitelist of v1 API headers of form x-image-meta-xxx
IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
'x-image-meta-is_public', 'x-image-meta-disk_format',
'x-image-meta-container_format', 'x-image-meta-name',
'x-image-meta-status', 'x-image-meta-copy_from',
'x-image-meta-uri', 'x-image-meta-checksum',
'x-image-meta-created_at', 'x-image-meta-updated_at',
'x-image-meta-deleted_at', 'x-image-meta-min_ram',
'x-image-meta-min_disk', 'x-image-meta-owner',
'x-image-meta-store', 'x-image-meta-id',
'x-image-meta-protected', 'x-image-meta-deleted',
'x-image-meta-virtual_size']
GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD'
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def cooperative_iter(iter):
"""
Return an iterator which schedules after each
iteration. This can prevent eventlet thread starvation.
:param iter: an iterator to wrap
"""
try:
for chunk in iter:
sleep(0)
yield chunk
except Exception as err:
with excutils.save_and_reraise_exception():
msg = _LE("Error: cooperative_iter exception %s") % err
LOG.error(msg)
def cooperative_read(fd):
"""
Wrap a file descriptor's read with a partial function which schedules
after each read. This can prevent eventlet thread starvation.
:param fd: a file descriptor to wrap
"""
def readfn(*args):
result = fd.read(*args)
sleep(0)
return result
return readfn
MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
class CooperativeReader(object):
"""
An eventlet thread friendly class for reading in image data.
When accessing data either through the iterator or the read method
we perform a sleep to allow a co-operative yield. When there is more than
one image being uploaded/downloaded this prevents eventlet thread
starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active.
"""
def __init__(self, fd):
"""
:param fd: Underlying image file object
"""
self.fd = fd
self.iterator = None
# NOTE(markwash): if the underlying supports read(), overwrite the
# default iterator-based implementation with cooperative_read which
# is more straightforward
if hasattr(fd, 'read'):
self.read = cooperative_read(fd)
else:
self.iterator = None
self.buffer = b''
self.position = 0
def read(self, length=None):
"""Return the requested amount of bytes, fetching the next chunk of
the underlying iterator when needed.
This is replaced with cooperative_read in __init__ if the underlying
fd already supports read().
"""
if length is None:
if len(self.buffer) - self.position > 0:
# if no length specified but some data exists in buffer,
# return that data and clear the buffer
result = self.buffer[self.position:]
self.buffer = b''
self.position = 0
return str(result)
else:
# otherwise read the next chunk from the underlying iterator
# and return it as a whole. Reset the buffer, as subsequent
# calls may specify the length
try:
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
except StopIteration:
return ''
finally:
self.buffer = b''
self.position = 0
else:
result = bytearray()
while len(result) < length:
if self.position < len(self.buffer):
to_read = length - len(result)
chunk = self.buffer[self.position:self.position + to_read]
result.extend(chunk)
# This check is here to prevent potential OOM issues if
# this code is called with unreasonably high values of read
# size. Currently it is only called from the HTTP clients
# of Glance backend stores, which use httplib for data
# streaming, which has readsize hardcoded to 8K, so this
# check should never fire. Regardless it still worths to
# make the check, as the code may be reused somewhere else.
if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
raise exception.LimitExceeded()
self.position += len(chunk)
else:
try:
if self.iterator is None:
self.iterator = self.__iter__()
self.buffer = next(self.iterator)
self.position = 0
except StopIteration:
self.buffer = b''
self.position = 0
return bytes(result)
return bytes(result)
def __iter__(self):
return cooperative_iter(self.fd.__iter__())
class LimitingReader(object):
"""
Reader designed to fail when reading image data past the configured
allowable amount.
"""
def __init__(self, data, limit):
"""
:param data: Underlying image data object
:param limit: maximum number of bytes the reader should allow
"""
self.data = data
self.limit = limit
self.bytes_read = 0
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
else:
yield chunk
def read(self, i):
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
return result
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = six.text_type(pv)
else:
headers["x-image-meta-%s" % k.lower()] = six.text_type(v)
return headers
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS:
msg = _("Bad header: %(header_name)s") % {'header_name': key}
raise exc.HTTPBadRequest(msg, content_type="text/plain")
result[field_name] = value or None
result['properties'] = properties
for key, nullable in [('size', False), ('min_disk', False),
('min_ram', False), ('virtual_size', True)]:
if key in result:
try:
result[key] = int(result[key])
except ValueError:
if nullable and result[key] == str(None):
result[key] = None
else:
extra = (_("Cannot convert image %(key)s '%(value)s' "
"to an integer.")
% {'key': key, 'value': result[key]})
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
if result[key] is not None and result[key] < 0:
extra = _('Cannot be a negative value.')
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = strutils.bool_from_string(result[key])
return result
def create_mashup_dict(image_meta):
"""
Returns a dictionary-like mashup of the image core properties
and the image custom properties from given image metadata.
:param image_meta: metadata of image with core and custom properties
"""
d = {}
for key, value in six.iteritems(image_meta):
if isinstance(value, dict):
for subkey, subvalue in six.iteritems(
create_mashup_dict(value)):
if subkey not in image_meta:
d[subkey] = subvalue
else:
d[key] = value
return d
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def mutating(func):
"""Decorator to enforce read-only logic"""
@functools.wraps(func)
def wrapped(self, req, *args, **kwargs):
if req.context.read_only:
msg = "Read-only access"
LOG.debug(msg)
raise exc.HTTPForbidden(msg, request=req,
content_type="text/plain")
return func(self, req, *args, **kwargs)
return wrapped
def setup_remote_pydev_debug(host, port):
error_msg = _LE('Error setting up the debug environment. Verify that the'
' option pydev_worker_debug_host is pointing to a valid '
'hostname or IP on which a pydev server is listening on'
' the port indicated by pydev_worker_debug_port.')
try:
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(host,
port=port,
stdoutToServer=True,
stderrToServer=True)
return True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(error_msg)
def validate_key_cert(key_file, cert_file):
try:
error_key_name = "private key"
error_filename = key_file
with open(key_file, 'r') as keyfile:
key_str = keyfile.read()
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
error_key_name = "certificate"
error_filename = cert_file
with open(cert_file, 'r') as certfile:
cert_str = certfile.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except IOError as ioe:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it."
" Error: %(ioe)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ioe': ioe})
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it. OpenSSL"
" error: %(ce)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ce': ce})
try:
data = str(uuid.uuid4())
# On Python 3, explicitly encode to UTF-8 to call crypto.sign() which
# requires bytes. Otherwise, it raises a deprecation warning (and
# will raise an error later).
data = encodeutils.to_utf8(data)
digest = CONF.digest_algorithm
if digest == 'sha1':
LOG.warn(
_LW('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
' state that the SHA-1 is not suitable for'
' general-purpose digital signature applications (as'
' specified in FIPS 186-3) that require 112 bits of'
' security. The default value is sha1 in Kilo for a'
' smooth upgrade process, and it will be updated'
' with sha256 in next release(L).'))
out = crypto.sign(key, data, digest)
crypto.verify(cert, out, data, digest)
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your key pair. "
"Please verify that cert %(cert_file)s and "
"key %(key_file)s belong together. OpenSSL "
"error %(ce)s") % {'cert_file': cert_file,
'key_file': key_file,
'ce': ce})
def get_test_suite_socket():
global GLANCE_TEST_SOCKET_FD_STR
if GLANCE_TEST_SOCKET_FD_STR in os.environ:
fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR])
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
if six.PY2:
sock = socket.SocketType(_sock=sock)
sock.listen(CONF.backlog)
del os.environ[GLANCE_TEST_SOCKET_FD_STR]
os.close(fd)
return sock
return None
def is_valid_hostname(hostname):
"""Verify whether a hostname (not an FQDN) is valid."""
return re.match('^[a-zA-Z0-9-]+$', hostname) is not None
def is_valid_fqdn(fqdn):
"""Verify whether a host is a valid FQDN."""
return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None
def parse_valid_host_port(host_port):
"""
Given a "host:port" string, attempts to parse it as intelligently as
possible to determine if it is valid. This includes IPv6 [host]:port form,
IPv4 ip:port form, and hostname:port or fqdn:port form.
Invalid inputs will raise a ValueError, while valid inputs will return
a (host, port) tuple where the port will always be of type int.
"""
try:
try:
host, port = netutils.parse_host_port(host_port)
except Exception:
raise ValueError(_('Host and port "%s" is not valid.') % host_port)
if not netutils.is_valid_port(port):
raise ValueError(_('Port "%s" is not valid.') % port)
# First check for valid IPv6 and IPv4 addresses, then a generic
# hostname. Failing those, if the host includes a period, then this
# should pass a very generic FQDN check. The FQDN check for letters at
# the tail end will weed out any hilariously absurd IPv4 addresses.
if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or
is_valid_hostname(host) or is_valid_fqdn(host)):
raise ValueError(_('Host "%s" is not valid.') % host)
except Exception as ex:
raise ValueError(_('%s '
'Please specify a host:port pair, where host is an '
'IPv4 address, IPv6 address, hostname, or FQDN. If '
'using an IPv6 address, enclose it in brackets '
'separately from the port (i.e., '
'"[fe80::a:b:c]:9876").') % ex)
return (host, int(port))
try:
REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2 build case
REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
def no_4byte_params(f):
"""
Checks that no 4 byte unicode characters are allowed
in dicts' keys/values and string's parameters
"""
def wrapper(*args, **kwargs):
def _is_match(some_str):
return (isinstance(some_str, six.text_type) and
REGEX_4BYTE_UNICODE.findall(some_str) != [])
def _check_dict(data_dict):
# a dict of dicts has to be checked recursively
for key, value in six.iteritems(data_dict):
if isinstance(value, dict):
_check_dict(value)
else:
if _is_match(key):
msg = _("Property names can't contain 4 byte unicode.")
raise exception.Invalid(msg)
if _is_match(value):
msg = (_("%s can't contain 4 byte unicode characters.")
% key.title())
raise exception.Invalid(msg)
for data_dict in [arg for arg in args if isinstance(arg, dict)]:
_check_dict(data_dict)
# now check args for str values
for arg in args:
if _is_match(arg):
msg = _("Param values can't contain 4 byte unicode.")
raise exception.Invalid(msg)
# check kwargs as well, as params are passed as kwargs via
# registry calls
_check_dict(kwargs)
return f(*args, **kwargs)
return wrapper
def validate_mysql_int(*args, **kwargs):
"""
Make sure that all arguments are less than 2 ** 31 - 1.
This limitation is introduced because mysql stores INT in 4 bytes.
If the validation fails for some argument, exception.Invalid is raised with
appropriate information.
"""
max_int = (2 ** 31) - 1
for param in args:
if param > max_int:
msg = _("Value %(value)d out of range, "
"must not exceed %(max)d") % {"value": param,
"max": max_int}
raise exception.Invalid(msg)
for param_str in kwargs:
param = kwargs.get(param_str)
if param and param > max_int:
msg = _("'%(param)s' value out of range, "
"must not exceed %(max)d") % {"param": param_str,
"max": max_int}
raise exception.Invalid(msg)
def stash_conf_values():
"""
Make a copy of some of the current global CONF's settings.
Allows determining if any of these values have changed
when the config is reloaded.
"""
conf = {
'bind_host': CONF.bind_host,
'bind_port': CONF.bind_port,
'tcp_keepidle': CONF.cert_file,
'backlog': CONF.backlog,
'key_file': CONF.key_file,
'cert_file': CONF.cert_file
}
return conf
def split_filter_op(expression):
"""Split operator from threshold in an expression.
Designed for use on a comparative-filtering query field.
When no operator is found, default to an equality comparison.
:param expression: the expression to parse
:returns: a tuple (operator, threshold) parsed from expression
"""
left, sep, right = expression.partition(':')
if sep:
# If the expression is a date of the format ISO 8601 like
# CCYY-MM-DDThh:mm:ss+hh:mm and has no operator, it should
# not be partitioned, and a default operator of eq should be
# assumed.
try:
timeutils.parse_isotime(expression)
op = 'eq'
threshold = expression
except ValueError:
op = left
threshold = right
else:
op = 'eq' # default operator
threshold = left
# NOTE stevelle decoding escaped values may be needed later
return op, threshold
def validate_quotes(value):
"""Validate filter values
Validation opening/closing quotes in the expression.
"""
open_quotes = True
for i in range(len(value)):
if value[i] == '"':
if i and value[i - 1] == '\\':
continue
if open_quotes:
if i and value[i - 1] != ',':
msg = _("Invalid filter value %s. There is no comma "
"before opening quotation mark.") % value
raise exception.InvalidParameterValue(message=msg)
else:
if i + 1 != len(value) and value[i + 1] != ",":
msg = _("Invalid filter value %s. There is no comma "
"after closing quotation mark.") % value
raise exception.InvalidParameterValue(message=msg)
open_quotes = not open_quotes
if not open_quotes:
msg = _("Invalid filter value %s. The quote is not closed.") % value
raise exception.InvalidParameterValue(message=msg)
def split_filter_value_for_quotes(value):
"""Split filter values
Split values by commas and quotes for 'in' operator, according api-wg.
"""
validate_quotes(value)
tmp = re.compile(r'''
"( # if found a double-quote
[^\"\\]* # take characters either non-quotes or backslashes
(?:\\. # take backslashes and character after it
[^\"\\]*)* # take characters either non-quotes or backslashes
) # before double-quote
",? # a double-quote with comma maybe
| ([^,]+),? # if not found double-quote take any non-comma
# characters with comma maybe
| , # if we have only comma take empty string
''', re.VERBOSE)
return [val[0] or val[1] for val in re.findall(tmp, value)]
def evaluate_filter_op(value, operator, threshold):
"""Evaluate a comparison operator.
Designed for use on a comparative-filtering query field.
:param value: evaluated against the operator, as left side of expression
:param operator: any supported filter operation
:param threshold: to compare value against, as right side of expression
:raises: InvalidFilterOperatorValue if an unknown operator is provided
:returns: boolean result of applied comparison
"""
if operator == 'gt':
return value > threshold
elif operator == 'gte':
return value >= threshold
elif operator == 'lt':
return value < threshold
elif operator == 'lte':
return value <= threshold
elif operator == 'neq':
return value != threshold
elif operator == 'eq':
return value == threshold
msg = _("Unable to filter on a unknown operator.")
raise exception.InvalidFilterOperatorValue(msg)
| apache-2.0 |
v-iam/azure-sdk-for-python | azure-mgmt-search/azure/mgmt/search/operations/query_keys_operations.py | 4 | 13343 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class QueryKeysOperations(object):
"""QueryKeysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for each request. The current version is 2015-08-19. Constant value: "2015-08-19".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-08-19"
self.config = config
def create(
self, resource_group_name, search_service_name, name, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Generates a new query key for the specified Search service. You can
create up to 50 query keys per service.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Search service
associated with the specified resource group.
:type search_service_name: str
:param name: The name of the new query API key.
:type name: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`QueryKey <azure.mgmt.search.models.QueryKey>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/createQueryKey/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('QueryKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_search_service(
self, resource_group_name, search_service_name, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Returns the list of query API keys for the given Azure Search service.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Search service
associated with the specified resource group.
:type search_service_name: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`QueryKeyPaged
<azure.mgmt.search.models.QueryKeyPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/listQueryKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.QueryKeyPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.QueryKeyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, search_service_name, key, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified query key. Unlike admin keys, query keys are not
regenerated. The process for regenerating a query key is to delete and
then recreate it.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Search service
associated with the specified resource group.
:type search_service_name: str
:param key: The query key to be deleted. Query keys are identified by
value, not by name.
:type key: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/deleteQueryKey/{key}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'key': self._serialize.url("key", key, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| mit |
wavelets/chainer | tests/functions_tests/test_accuracy.py | 7 | 1290 | import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestAccuracy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 3)).astype(numpy.float32)
self.t = numpy.random.randint(3, size=(10,)).astype(numpy.int32)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = chainer.functions.accuracy(x, t)
self.assertEqual(y.data.dtype, numpy.float32)
self.assertEqual((), y.data.shape)
count = 0
for i in six.moves.range(self.t.size):
pred = self.x[i].argmax()
if pred == self.t[i]:
count += 1
expected = float(count) / self.t.size
gradient_check.assert_allclose(expected, cuda.to_cpu(y.data))
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
testing.run_module(__name__, __file__)
| mit |
ToAruShiroiNeko/revscoring | setup.py | 1 | 1233 | import os
from setuptools import find_packages, setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def requirements(fname):
return [line.strip()
for line in open(os.path.join(os.path.dirname(__file__), fname))]
setup(
name="revscoring",
version="0.7.0", # change in revscoring/__init__.py
author="Aaron Halfaker",
author_email="ahalfaker@wikimedia.org",
description=("A set of utilities for generating quality scores for " + \
"MediaWiki revisions"),
license="MIT",
entry_points = {
'console_scripts': [
'revscoring = revscoring.revscoring:main',
],
},
url="https://github.com/halfak/Revision-Scores",
packages=find_packages(),
long_description=read('README.rst'),
install_requires=requirements("requirements.txt"),
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
)
| mit |
snowballhg/node-gyp | gyp/test/include_dirs/gyptest-default.py | 102 | 1073 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies use of include_dirs when using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'scons':
test.skip_test('TODO: http://code.google.com/p/gyp/issues/detail?id=176\n')
test.run_gyp('includes.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('includes.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from includes.c
Hello from inc.h
Hello from include1.h
Hello from subdir/inc2/include2.h
Hello from shadow2/shadow.h
"""
test.run_built_executable('includes', stdout=expect, chdir='relocate/src')
if test.format == 'xcode':
chdir='relocate/src/subdir'
else:
chdir='relocate/src'
expect = """\
Hello from subdir/subdir_includes.c
Hello from subdir/inc.h
Hello from include1.h
Hello from subdir/inc2/include2.h
"""
test.run_built_executable('subdir_includes', stdout=expect, chdir=chdir)
test.pass_test()
| mit |
donspaulding/adspygoogle | examples/adspygoogle/dfp/v201204/get_custom_targeting_keys_by_statement.py | 2 | 2094 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all predefined custom targeting keys. The statement
retrieves up to the maximum page size limit of 500. To create custom
targeting keys, run create_custom_targeting_keys_and_values.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201204')
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'PREDEFINED'
}
}]
filter_statement = {'query': 'WHERE type = :type LIMIT 500',
'values': values}
# Get custom targeting keys by statement.
response = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]
keys = []
if 'results' in response:
keys = response['results']
# Display results.
if keys:
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
% (key['id'], key['name'], key['displayName'], key['type']))
else:
print 'No keys were found.'
| apache-2.0 |
vikigenius/ns3-mmwave | .waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/fc.py | 18 | 4162 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib import Utils,Task,Logs
from waflib.Tools import ccroot,fc_config,fc_scan
from waflib.TaskGen import feature,extension
from waflib.Configure import conf
ccroot.USELIB_VARS['fc']=set(['FCFLAGS','DEFINES','INCLUDES'])
ccroot.USELIB_VARS['fcprogram_test']=ccroot.USELIB_VARS['fcprogram']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
ccroot.USELIB_VARS['fcshlib']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
ccroot.USELIB_VARS['fcstlib']=set(['ARFLAGS','LINKDEPS'])
@feature('fcprogram','fcshlib','fcstlib','fcprogram_test')
def dummy(self):
pass
@extension('.f','.f90','.F','.F90','.for','.FOR')
def fc_hook(self,node):
return self.create_compiled_task('fc',node)
@conf
def modfile(conf,name):
return{'lower':name.lower()+'.mod','lower.MOD':name.upper()+'.MOD','UPPER.mod':name.upper()+'.mod','UPPER':name.upper()+'.MOD'}[conf.env.FC_MOD_CAPITALIZATION or'lower']
def get_fortran_tasks(tsk):
bld=tsk.generator.bld
tasks=bld.get_tasks_group(bld.get_group_idx(tsk.generator))
return[x for x in tasks if isinstance(x,fc)and not getattr(x,'nomod',None)and not getattr(x,'mod_fortran_done',None)]
class fc(Task.Task):
color='GREEN'
run_str='${FC} ${FCFLAGS} ${FCINCPATH_ST:INCPATHS} ${FCDEFINES_ST:DEFINES} ${_FCMODOUTFLAGS} ${FC_TGT_F}${TGT[0].abspath()} ${FC_SRC_F}${SRC[0].abspath()}'
vars=["FORTRANMODPATHFLAG"]
def scan(self):
tmp=fc_scan.fortran_parser(self.generator.includes_nodes)
tmp.task=self
tmp.start(self.inputs[0])
if Logs.verbose:
Logs.debug('deps: deps for %r: %r; unresolved %r'%(self.inputs,tmp.nodes,tmp.names))
return(tmp.nodes,tmp.names)
def runnable_status(self):
if getattr(self,'mod_fortran_done',None):
return super(fc,self).runnable_status()
bld=self.generator.bld
lst=get_fortran_tasks(self)
for tsk in lst:
tsk.mod_fortran_done=True
for tsk in lst:
ret=tsk.runnable_status()
if ret==Task.ASK_LATER:
for x in lst:
x.mod_fortran_done=None
return Task.ASK_LATER
ins=Utils.defaultdict(set)
outs=Utils.defaultdict(set)
for tsk in lst:
key=tsk.uid()
for x in bld.raw_deps[key]:
if x.startswith('MOD@'):
name=bld.modfile(x.replace('MOD@',''))
node=bld.srcnode.find_or_declare(name)
if not getattr(node,'sig',None):
node.sig=Utils.SIG_NIL
tsk.set_outputs(node)
outs[id(node)].add(tsk)
for tsk in lst:
key=tsk.uid()
for x in bld.raw_deps[key]:
if x.startswith('USE@'):
name=bld.modfile(x.replace('USE@',''))
node=bld.srcnode.find_resource(name)
if node and node not in tsk.outputs:
if not node in bld.node_deps[key]:
bld.node_deps[key].append(node)
ins[id(node)].add(tsk)
for k in ins.keys():
for a in ins[k]:
a.run_after.update(outs[k])
tmp=[]
for t in outs[k]:
tmp.extend(t.outputs)
a.dep_nodes.extend(tmp)
a.dep_nodes.sort(key=lambda x:x.abspath())
for tsk in lst:
try:
delattr(tsk,'cache_sig')
except AttributeError:
pass
return super(fc,self).runnable_status()
class fcprogram(ccroot.link_task):
color='YELLOW'
run_str='${FC} ${LINKFLAGS} ${FCLNK_SRC_F}${SRC} ${FCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FCSTLIB_MARKER} ${FCSTLIBPATH_ST:STLIBPATH} ${FCSTLIB_ST:STLIB} ${FCSHLIB_MARKER} ${FCLIBPATH_ST:LIBPATH} ${FCLIB_ST:LIB} ${LDFLAGS}'
inst_to='${BINDIR}'
class fcshlib(fcprogram):
inst_to='${LIBDIR}'
class fcprogram_test(fcprogram):
def runnable_status(self):
ret=super(fcprogram_test,self).runnable_status()
if ret==Task.SKIP_ME:
ret=Task.RUN_ME
return ret
def exec_command(self,cmd,**kw):
bld=self.generator.bld
kw['shell']=isinstance(cmd,str)
kw['stdout']=kw['stderr']=Utils.subprocess.PIPE
kw['cwd']=bld.variant_dir
bld.out=bld.err=''
bld.to_log('command: %s\n'%cmd)
kw['output']=0
try:
(bld.out,bld.err)=bld.cmd_and_log(cmd,**kw)
except Exception:
return-1
if bld.out:
bld.to_log("out: %s\n"%bld.out)
if bld.err:
bld.to_log("err: %s\n"%bld.err)
class fcstlib(ccroot.stlink_task):
pass
| gpl-2.0 |
fitzgen/servo | tests/wpt/web-platform-tests/tools/py/py/_error.py | 186 | 2818 | """
create errno-specific classes for IO or os calls.
"""
import sys, os, errno
class Error(EnvironmentError):
def __repr__(self):
return "%s.%s %r: %s " %(self.__class__.__module__,
self.__class__.__name__,
self.__class__.__doc__,
" ".join(map(str, self.args)),
#repr(self.args)
)
def __str__(self):
s = "[%s]: %s" %(self.__class__.__doc__,
" ".join(map(str, self.args)),
)
return s
_winerrnomap = {
2: errno.ENOENT,
3: errno.ENOENT,
17: errno.EEXIST,
13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
22: errno.ENOTDIR,
20: errno.ENOTDIR,
267: errno.ENOTDIR,
5: errno.EACCES, # anything better?
}
class ErrorMaker(object):
""" lazily provides Exception classes for each possible POSIX errno
(as defined per the 'errno' module). All such instances
subclass EnvironmentError.
"""
Error = Error
_errno2class = {}
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
eno = getattr(errno, name)
cls = self._geterrnoclass(eno)
setattr(self, name, cls)
return cls
def _geterrnoclass(self, eno):
try:
return self._errno2class[eno]
except KeyError:
clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
errorcls = type(Error)(clsname, (Error,),
{'__module__':'py.error',
'__doc__': os.strerror(eno)})
self._errno2class[eno] = errorcls
return errorcls
def checked_call(self, func, *args, **kwargs):
""" call a function and raise an errno-exception if applicable. """
__tracebackhide__ = True
try:
return func(*args, **kwargs)
except self.Error:
raise
except (OSError, EnvironmentError):
cls, value, tb = sys.exc_info()
if not hasattr(value, 'errno'):
raise
__tracebackhide__ = False
errno = value.errno
try:
if not isinstance(value, WindowsError):
raise NameError
except NameError:
# we are not on Windows, or we got a proper OSError
cls = self._geterrnoclass(errno)
else:
try:
cls = self._geterrnoclass(_winerrnomap[errno])
except KeyError:
raise value
raise cls("%s%r" % (func.__name__, args))
__tracebackhide__ = True
error = ErrorMaker()
| mpl-2.0 |
blakfeld/ansible | lib/ansible/plugins/shell/csh.py | 92 | 1086 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\\\n'
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
| gpl-3.0 |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/site-packages/xcbgen/matcher.py | 3 | 3694 | '''
XML parser. One function for each top-level element in the schema.
Most functions just declare a new object and add it to the module.
For typedefs, eventcopies, xidtypes, and other aliases though,
we do not create a new type object, we just record the existing one under a new name.
'''
from os.path import join
from xml.etree.cElementTree import parse
from xcbgen.xtypes import *
def import_(node, module, namespace):
'''
For imports, we load the file, create a new namespace object,
execute recursively, then record the import (for header files, etc.)
'''
# To avoid circular import error
from xcbgen import state
module.import_level = module.import_level + 1
new_file = join(namespace.dir, '%s.xml' % node.text)
new_root = parse(new_file).getroot()
new_namespace = state.Namespace(new_file)
execute(module, new_namespace)
module.import_level = module.import_level - 1
if not module.has_import(node.text):
module.add_import(node.text, new_namespace)
def typedef(node, module, namespace):
id = node.get('newname')
name = namespace.prefix + (id,)
type = module.get_type(node.get('oldname'))
module.add_type(id, namespace.ns, name, type)
def xidtype(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = module.get_type('CARD32')
module.add_type(id, namespace.ns, name, type)
def xidunion(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = module.get_type('CARD32')
module.add_type(id, namespace.ns, name, type)
def enum(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Enum(name, node)
module.add_type(id, namespace.ns, name, type)
def struct(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Struct(name, node)
module.add_type(id, namespace.ns, name, type)
def union(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Union(name, node)
module.add_type(id, namespace.ns, name, type)
def request(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Request(name, node)
module.add_request(id, name, type)
def event(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
event = Event(name, node)
event.add_opcode(node.get('number'), name, True)
module.add_event(id, name, event)
def eventcopy(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
event = module.get_event(node.get('ref'))
event.add_opcode(node.get('number'), name, False)
module.add_event(id, name, event)
def error(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
error = Error(name, node)
error.add_opcode(node.get('number'), name, True)
module.add_error(id, name, error)
def errorcopy(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
error = module.get_error(node.get('ref'))
error.add_opcode(node.get('number'), name, False)
module.add_error(id, name, error)
funcs = {'import' : import_,
'typedef' : typedef,
'xidtype' : xidtype,
'xidunion' : xidunion,
'enum' : enum,
'struct' : struct,
'union' : union,
'request' : request,
'event' : event,
'eventcopy' : eventcopy,
'error' : error,
'errorcopy' : errorcopy}
def execute(module, namespace):
for elt in list(namespace.root):
funcs[elt.tag](elt, module, namespace)
| gpl-2.0 |
tmpgit/intellij-community | python/helpers/pycharm/lettuce_runner.py | 43 | 8795 | # coding=utf-8
"""
BDD lettuce framework runner
TODO: Support other params (like tags) as well.
Supports only 2 params now: folder to search "features" for or file and "-s scenario_index"
"""
import inspect
import optparse
import os
import _bdd_utils
__author__ = 'Ilya.Kazakevich'
from lettuce.exceptions import ReasonToFail
import lettuce
from lettuce import core
class _LettuceRunner(_bdd_utils.BddRunner):
"""
Lettuce runner (BddRunner for lettuce)
"""
def __init__(self, base_dir, what_to_run, scenarios, options):
"""
:param scenarios scenario numbers to run
:type scenarios list
:param base_dir base directory to run tests in
:type base_dir: str
:param what_to_run folder or file to run
:type options optparse.Values
:param options optparse options passed by user
:type what_to_run str
"""
super(_LettuceRunner, self).__init__(base_dir)
# TODO: Copy/Paste with lettuce.bin, need to reuse somehow
# Delete args that do not exist in constructor
args_to_pass = options.__dict__
runner_args = inspect.getargspec(lettuce.Runner.__init__)[0]
unknown_args = set(args_to_pass.keys()) - set(runner_args)
map(args_to_pass.__delitem__, unknown_args)
# Tags is special case and need to be preprocessed
self.__tags = None # Store tags in field
if 'tags' in args_to_pass.keys() and args_to_pass['tags']:
args_to_pass['tags'] = [tag.strip('@') for tag in args_to_pass['tags']]
self.__tags = set(args_to_pass['tags'])
# Special cases we pass directly
args_to_pass['base_path'] = what_to_run
args_to_pass['scenarios'] = ",".join(scenarios)
self.__runner = lettuce.Runner(**args_to_pass)
def _get_features_to_run(self):
super(_LettuceRunner, self)._get_features_to_run()
features = []
if self.__runner.single_feature: # We need to run one and only one feature
features = [core.Feature.from_file(self.__runner.single_feature)]
else:
# Find all features in dir
for feature_file in self.__runner.loader.find_feature_files():
feature = core.Feature.from_file(feature_file)
assert isinstance(feature, core.Feature), feature
# TODO: cut out due to https://github.com/gabrielfalcao/lettuce/issues/451 Fix when this issue fixed
feature.scenarios = filter(lambda s: not s.outlines, feature.scenarios)
if feature.scenarios:
features.append(feature)
# Choose only selected scenarios
if self.__runner.scenarios:
for feature in features:
filtered_feature_scenarios = []
for index in [i - 1 for i in self.__runner.scenarios]: # decrease index by 1
if index < len(feature.scenarios):
filtered_feature_scenarios.append(feature.scenarios[index])
feature.scenarios = filtered_feature_scenarios
# Filter out tags TODO: Share with behave_runner.py#__filter_scenarios_by_args
if self.__tags:
for feature in features:
feature.scenarios = filter(lambda s: set(s.tags) & self.__tags, feature.scenarios)
return features
def _run_tests(self):
super(_LettuceRunner, self)._run_tests()
self.__install_hooks()
self.__runner.run()
def __step(self, is_started, step):
"""
Reports step start / stop
:type step core.Step
:param step: step
"""
test_name = step.sentence
if is_started:
self._test_started(test_name, step.described_at)
elif step.passed:
self._test_passed(test_name)
elif step.failed:
reason = step.why
assert isinstance(reason, ReasonToFail), reason
self._test_failed(test_name, message=reason.exception.message, details=reason.traceback)
elif step.has_definition:
self._test_skipped(test_name, "In lettuce, we do know the reason", step.described_at)
else:
self._test_undefined(test_name, step.described_at)
def __install_hooks(self):
"""
Installs required hooks
"""
# Install hooks
lettuce.before.each_feature(
lambda f: self._feature_or_scenario(True, f.name, f.described_at))
lettuce.after.each_feature(
lambda f: self._feature_or_scenario(False, f.name, f.described_at))
lettuce.before.each_scenario(
lambda s: self.__scenario(True, s))
lettuce.after.each_scenario(
lambda s: self.__scenario(False, s))
lettuce.before.each_background(
lambda b, *args: self._background(True, b.feature.described_at))
lettuce.after.each_background(
lambda b, *args: self._background(False, b.feature.described_at))
lettuce.before.each_step(lambda s: self.__step(True, s))
lettuce.after.each_step(lambda s: self.__step(False, s))
def __scenario(self, is_started, scenario):
"""
Reports scenario launched
:type scenario core.Scenario
:param scenario: scenario
"""
if scenario.outlines:
scenario.steps = [] # Clear to prevent running. TODO: Fix when this issue fixed
scenario.background = None # TODO: undocumented
return
self._feature_or_scenario(is_started, scenario.name, scenario.described_at)
def _get_args():
"""
Get options passed by user
:return: tuple (options, args), see optparse
"""
# TODO: Copy/Paste with lettuce.bin, need to reuse somehow
parser = optparse.OptionParser()
parser.add_option("-v", "--verbosity",
dest="verbosity",
default=0, # We do not need verbosity due to GUI we use (although user may override it)
help='The verbosity level')
parser.add_option("-s", "--scenarios",
dest="scenarios",
default=None,
help='Comma separated list of scenarios to run')
parser.add_option("-t", "--tag",
dest="tags",
default=None,
action='append',
help='Tells lettuce to run the specified tags only; '
'can be used multiple times to define more tags'
'(prefixing tags with "-" will exclude them and '
'prefixing with "~" will match approximate words)')
parser.add_option("-r", "--random",
dest="random",
action="store_true",
default=False,
help="Run scenarios in a more random order to avoid interference")
parser.add_option("--with-xunit",
dest="enable_xunit",
action="store_true",
default=False,
help='Output JUnit XML test results to a file')
parser.add_option("--xunit-file",
dest="xunit_file",
default=None,
type="string",
help='Write JUnit XML to this file. Defaults to '
'lettucetests.xml')
parser.add_option("--with-subunit",
dest="enable_subunit",
action="store_true",
default=False,
help='Output Subunit test results to a file')
parser.add_option("--subunit-file",
dest="subunit_filename",
default=None,
help='Write Subunit data to this file. Defaults to '
'subunit.bin')
parser.add_option("--failfast",
dest="failfast",
default=False,
action="store_true",
help='Stop running in the first failure')
parser.add_option("--pdb",
dest="auto_pdb",
default=False,
action="store_true",
help='Launches an interactive debugger upon error')
return parser.parse_args()
if __name__ == "__main__":
options, args = _get_args()
(base_dir, scenarios, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
if len(what_to_run) > 1:
raise Exception("Lettuce can't run more than one file now")
_bdd_utils.fix_win_drive(what_to_run[0])
_LettuceRunner(base_dir, what_to_run[0], scenarios, options).run() | apache-2.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/selenium/webdriver/common/action_chains.py | 67 | 9934 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The ActionChains implementation,
"""
from selenium.webdriver.remote.command import Command
from selenium.webdriver.common.keys import Keys
class ActionChains(object):
"""
ActionChains are a way to automate low level interactions such as
mouse movements, mouse button actions, key press, and context menu interactions.
This is useful for doing more complex actions like hover over and drag and drop.
Generate user actions.
When you call methods for actions on the ActionChains object,
the actions are stored in a queue in the ActionChains object.
When you call perform(), the events are fired in the order they
are queued up.
ActionChains can be used in a chain pattern::
menu = driver.find_element_by_css_selector(".nav")
hidden_submenu = driver.find_element_by_css_selector(".nav #submenu1")
ActionChains(driver).move_to_element(menu).click(hidden_submenu).perform()
Or actions can be queued up one by one, then performed.::
menu = driver.find_element_by_css_selector(".nav")
hidden_submenu = driver.find_element_by_css_selector(".nav #submenu1")
actions = ActionChains(driver)
actions.move_to_element(menu)
actions.click(hidden_submenu)
actions.perform()
Either way, the actions are performed in the order they are called, one after
another.
"""
def __init__(self, driver):
"""
Creates a new ActionChains.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
def perform(self):
"""
Performs all stored actions.
"""
for action in self._actions:
action()
def click(self, on_element=None):
"""
Clicks an element.
:Args:
- on_element: The element to click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element=None):
"""
Holds down the left mouse button on an element.
:Args:
- on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element=None):
"""
Performs a context-click (right click) on an element.
:Args:
- on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""
Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button.
:Args:
- source: The element to mouse down.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release()
return self
def key_down(self, value, element=None):
"""
Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
:Args:
- value: The modifier key to send. Values are defined in `Keys` class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {
"value": self._keys_to_typing(value) }))
return self
def key_up(self, value, element=None):
"""
Releases a modifier key.
:Args:
- value: The modifier key to send. Values are defined in Keys class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {
"value": self._keys_to_typing(value) }))
return self
def move_by_offset(self, xoffset, yoffset):
"""
Moving the mouse to an offset from current mouse position.
:Args:
- xoffset: X offset to move to, as a positive or negative integer.
- yoffset: Y offset to move to, as a positive or negative integer.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def move_to_element(self, to_element):
"""
Moving the mouse to the middle of an element.
:Args:
- to_element: The WebElement to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""
Move the mouse by an offset of the specified element.
Offsets are relative to the top-left corner of the element.
:Args:
- to_element: The WebElement to move to.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def release(self, on_element=None):
"""
Releasing a held mouse button on an element.
:Args:
- on_element: The element to mouse up.
If None, releases on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""
Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
self._actions.append(lambda:
self._driver.execute(Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{ 'value': self._keys_to_typing(keys_to_send)}))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""
Sends keys to an element.
:Args:
- element: The element to send keys.
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
self._actions.append(lambda:
element.send_keys(*keys_to_send))
return self
def _keys_to_typing(self, value):
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
return typing
# Context manager so ActionChains can be used in a 'with .. as' statements.
def __enter__(self):
return self # Return created instance of self.
def __exit__(self, _type, _value, _traceback):
pass # Do nothing, does not require additional cleanup.
| mit |
MSusik/invenio | invenio/modules/formatter/registry.py | 2 | 2577 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
import os
from invenio.ext.registry import ModuleAutoDiscoverySubRegistry
from flask.ext.registry import PkgResourcesDirDiscoveryRegistry, \
ModuleAutoDiscoveryRegistry, RegistryProxy
from invenio.utils.datastructures import LazyDict
format_elements = RegistryProxy(
'format_elements',
ModuleAutoDiscoverySubRegistry,
'format_elements'
)
format_templates_directories = RegistryProxy(
'format_templates_directories',
ModuleAutoDiscoveryRegistry,
'format_templates'
)
format_templates = RegistryProxy(
'format_templates',
PkgResourcesDirDiscoveryRegistry,
'.', registry_namespace=format_templates_directories
)
output_formats_directories = RegistryProxy(
'output_formats_directories',
ModuleAutoDiscoveryRegistry,
'output_formats'
)
output_formats = RegistryProxy(
'output_formats',
PkgResourcesDirDiscoveryRegistry,
'.', registry_namespace=output_formats_directories
)
def create_format_templates_lookup():
out = {}
def _register(path, level=1):
if level > 4:
return
normpath = os.path.normpath(path)
if os.path.isdir(normpath):
for p in os.listdir(normpath):
_register(os.path.join(normpath, p), level=level+1)
else:
parts = normpath.split(os.path.sep)
out[os.path.sep.join(parts[-level:])] = normpath
for t in reversed(format_templates):
_register(t)
return out
format_templates_lookup = LazyDict(create_format_templates_lookup)
def create_output_formats_lookup():
out = {}
for f in output_formats:
of = os.path.basename(f)
if of in out:
continue
out[of] = f
return out
output_formats_lookup = LazyDict(create_output_formats_lookup)
| gpl-2.0 |
sid5432/pyOTDR | pyotdr/fxdparams.py | 1 | 13020 | import sys
from datetime import datetime, timezone
import logging
from . import parts
logger = logging.getLogger(__name__)
sep = " :"
unit_map = {
"mt": " (meters)",
"km": " (kilometers)",
"mi": " (miles)",
"kf": " (kilo-ft)",
}
tracetype = {
"ST": "[standard trace]",
"RT": "[reverse trace]",
"DT": "[difference trace]",
"RF": "[reference]",
}
def process(fh, results):
"""
fh: file handle;
results: dict for results;
we assume mapblock.process() has already been run
"""
bname = "FxdParams"
hsize = len(bname) + 1 # include trailing '\0'
pname = "FxdParams.process():"
ref = None
status = "nok"
try:
ref = results["blocks"][bname]
startpos = ref["pos"]
fh.seek(startpos)
except:
logger.error("{} {} block starting position unknown".format(pname, bname))
return status
format = results["format"]
if format == 2:
mystr = fh.read(hsize).decode("ascii")
if mystr != bname + "\0":
logger.error("{} incorrect header {}".format(pname, mystr))
return status
results[bname] = dict()
xref = results[bname]
if format == 1:
plist = ( # name, start-pos, length (bytes), type, multiplier, precision, units
# value: display type: 'v' (value) or 'h' (hexidecimal) or 's' (string)
[
"date/time",
0,
4,
"v",
"",
"",
"",
], # ............... 0-3 seconds in Unix time
[
"unit",
4,
2,
"s",
"",
"",
"",
], # .................... 4-5 distance units, 2 char (km,mt,ft,kf,mi)
["wavelength", 6, 2, "v", 0.1, 1, "nm"], # ............ 6-7 wavelength (nm)
# from Andrew Jones
[
"acquisition offset",
8,
4,
"i",
"",
"",
"",
], # .............. 8-11 acquisition offset; units?
[
"number of pulse width entries",
12,
2,
"v",
"",
"",
"",
], # .. 12-13 number of pulse width entries
[
"pulse width",
14,
2,
"v",
"",
0,
"ns",
], # .......... 14-15 pulse width (ns)
[
"sample spacing",
16,
4,
"v",
1e-8,
"",
"usec",
], # .. 16-19 sample spacing (in usec)
[
"num data points",
20,
4,
"v",
"",
"",
"",
], # ....... 20-23 number of data points
[
"index",
24,
4,
"v",
1e-5,
6,
"",
], # ................ 24-27 index of refraction
[
"BC",
28,
2,
"v",
-0.1,
2,
"dB",
], # ................. 28-29 backscattering coeff
[
"num averages",
30,
4,
"v",
"",
"",
"",
], # .......... 30-33 number of averages
["range", 34, 4, "v", 2e-5, 6, "km"], # .............. 34-37 range (km)
# from Andrew Jones
["front panel offset", 38, 4, "i", "", "", ""], # ................ 38-41
[
"noise floor level",
42,
2,
"v",
"",
"",
"",
], # ................. 42-43 unsigned
["noise floor scaling factor", 44, 2, "i", "", "", ""], # ........ 44-45
[
"power offset first point",
46,
2,
"v",
"",
"",
"",
], # .......... 46-47 unsigned
["loss thr", 48, 2, "v", 0.001, 3, "dB"], # .......... 48-49 loss threshold
[
"refl thr",
50,
2,
"v",
-0.001,
3,
"dB",
], # ......... 50-51 reflection threshold
[
"EOT thr",
52,
2,
"v",
0.001,
3,
"dB",
], # ............ 52-53 end-of-transmission threshold
)
else:
plist = ( # name, start-pos, length (bytes), type, multiplier, precision, units
# value: display type: 'v' (value) or 'h' (hexidecimal) or 's' (string)
[
"date/time",
0,
4,
"v",
"",
"",
"",
], # ............... 0-3 seconds in Unix time
[
"unit",
4,
2,
"s",
"",
"",
"",
], # .................... 4-5 distance units, 2 char (km,mt,ft,kf,mi)
["wavelength", 6, 2, "v", 0.1, 1, "nm"], # ............ 6-7 wavelength (nm)
# from Andrew Jones
[
"acquisition offset",
8,
4,
"i",
"",
"",
"",
], # .............. 8-11 acquisition offset; units?
[
"acquisition offset distance",
12,
4,
"i",
"",
"",
"",
], # .... 12-15 acquisition offset distance; units?
[
"number of pulse width entries",
16,
2,
"v",
"",
"",
"",
], # .. 16-17 number of pulse width entries
[
"pulse width",
18,
2,
"v",
"",
0,
"ns",
], # .......... 18-19 pulse width (ns)
[
"sample spacing",
20,
4,
"v",
1e-8,
"",
"usec",
], # .. 20-23 sample spacing (usec)
[
"num data points",
24,
4,
"v",
"",
"",
"",
], # ....... 24-27 number of data points
[
"index",
28,
4,
"v",
1e-5,
6,
"",
], # ................ 28-31 index of refraction
[
"BC",
32,
2,
"v",
-0.1,
2,
"dB",
], # ................. 32-33 backscattering coeff
[
"num averages",
34,
4,
"v",
"",
"",
"",
], # .......... 34-37 number of averages
# from Dmitry Vaygant:
[
"averaging time",
38,
2,
"v",
0.1,
0,
"sec",
], # ..... 38-39 averaging time in seconds
[
"range",
40,
4,
"v",
2e-5,
6,
"km",
], # .............. 40-43 range (km); note x2
# from Andrew Jones
["acquisition range distance", 44, 4, "i", "", "", ""], # ........ 44-47
["front panel offset", 48, 4, "i", "", "", ""], # ................ 48-51
[
"noise floor level",
52,
2,
"v",
"",
"",
"",
], # ................. 52-53 unsigned
["noise floor scaling factor", 54, 2, "i", "", "", ""], # ........ 54-55
[
"power offset first point",
56,
2,
"v",
"",
"",
"",
], # .......... 56-57 unsigned
["loss thr", 58, 2, "v", 0.001, 3, "dB"], # .......... 58-59 loss threshold
[
"refl thr",
60,
2,
"v",
-0.001,
3,
"dB",
], # ......... 60-61 reflection threshold
[
"EOT thr",
62,
2,
"v",
0.001,
3,
"dB",
], # ............ 62-63 end-of-transmission threshold
[
"trace type",
64,
2,
"s",
"",
"",
"",
], # ............. 64-65 trace type (ST,RT,DT, or RF)
# from Andrew Jones
["X1", 66, 4, "i", "", "", ""], # ............. 66-69
["Y1", 70, 4, "i", "", "", ""], # ............. 70-73
["X2", 74, 4, "i", "", "", ""], # ............. 74-77
["Y2", 78, 4, "i", "", "", ""], # ............. 78-81
)
status = _process_fields(fh, plist, results)
# read the rest of the block (just in case)
endpos = results["blocks"][bname]["pos"] + results["blocks"][bname]["size"]
fh.read(endpos - fh.tell())
status = "ok"
return status
# ================================================================
def _process_fields(fh, plist, results):
bname = "FxdParams"
xref = results[bname]
# functions to use
# 'h': get_hexstring
# 'v': get_uint
# 's': get_string
# 'i': get_signed
count = 0
for field in plist:
name = field[0]
fsize = field[2]
ftype = field[3]
scale = field[4]
dgt = field[5]
unit = field[6]
xstr = ""
if ftype == "i":
val = parts.get_signed(fh, fsize)
xstr = val
elif ftype == "v":
val = parts.get_uint(fh, fsize)
if scale != "":
val *= scale
if dgt != "":
fmt = "%%.%df" % dgt
xstr = fmt % val
else:
xstr = val
elif ftype == "h":
xstr = parts.get_hex(fh, fsize)
elif ftype == "s":
xstr = fh.read(fsize).decode("utf-8")
else:
val = fh.read(fsize)
xstr = val
# .................................
if name == "date/time":
xstr = datetime.fromtimestamp(val, timezone.utc).strftime(
"%a %b %d %H:%M:%S %Y"
) + (" (%d sec)" % val)
elif name == "unit":
xstr += unit_map[xstr]
elif name == "trace type":
try:
xstr += tracetype[xstr]
except:
pass
# don't bother even trying if there are multiple pulse width entries; too lazy
# to restructure code to handle this case
if name == "number of pulse width entries" and val > 1:
logger.warning(
"Cannot handle multiple pulse width entries ({}); aborting".format(val)
)
# TODO should raise an exception instead of brutaly exit
sys.exit()
# .................................
logger.debug("%s %d. %s: %s %s" % (sep, count, name, xstr, unit))
xref[name] = xstr if unit == "" else str(xstr) + " " + unit
count += 1
# corrrections/adjustment:
ior = float(xref["index"])
ss = xref["sample spacing"].split(" ")[0]
dx = float(ss) * parts.sol / ior
xref["range"] = dx * int(xref["num data points"])
xref["resolution"] = dx * 1000.0 # in meters
logger.debug("%s [adjusted for refractive index]" % (sep))
logger.debug("%s resolution = %.14f m" % (sep, xref["resolution"]))
logger.debug("%s range = %.13f km" % (sep, xref["range"]))
status = "ok"
return status
| gpl-3.0 |
myang321/django | django/contrib/gis/gdal/driver.py | 526 | 3260 | from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as vcapi, raster as rcapi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class Driver(GDALBase):
"""
Wraps a GDAL/OGR Data Source Driver.
For more information, see the C API source code:
http://www.gdal.org/gdal_8h.html - http://www.gdal.org/ogr__api_8h.html
"""
# Case-insensitive aliases for some GDAL/OGR Drivers.
# For a complete list of original driver names see
# http://www.gdal.org/ogr_formats.html (vector)
# http://www.gdal.org/formats_list.html (raster)
_alias = {
# vector
'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
# raster
'tiff': 'GTiff',
'tif': 'GTiff',
'jpeg': 'JPEG',
'jpg': 'JPEG',
}
def __init__(self, dr_input):
"""
Initializes an GDAL/OGR driver on either a string or integer input.
"""
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self.ensure_registered()
# Checking the alias dictionary (case-insensitive) to see if an
# alias exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the GDAL/OGR driver by the string name.
for iface in (vcapi, rcapi):
driver = iface.get_driver_by_name(force_bytes(name))
if driver:
break
elif isinstance(dr_input, int):
self.ensure_registered()
for iface in (vcapi, rcapi):
driver = iface.get_driver(dr_input)
if driver:
break
elif isinstance(dr_input, c_void_p):
driver = dr_input
else:
raise GDALException('Unrecognized input type for GDAL/OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not driver:
raise GDALException('Could not initialize GDAL/OGR Driver on input: %s' % str(dr_input))
self.ptr = driver
def __str__(self):
return self.name
@classmethod
def ensure_registered(cls):
"""
Attempts to register all the data source drivers.
"""
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not cls.driver_count():
vcapi.register_all()
rcapi.register_all()
@classmethod
def driver_count(cls):
"""
Returns the number of GDAL/OGR data source drivers registered.
"""
return vcapi.get_driver_count() + rcapi.get_driver_count()
@property
def name(self):
"""
Returns description/name string for this driver.
"""
return force_text(rcapi.get_driver_description(self.ptr))
| bsd-3-clause |
dahool/vertaal | timezones/tests.py | 3 | 1821 |
__test__ = {"API_TESTS": r"""
>>> from datetime import datetime
>>> from django.conf import settings
>>> ORIGINAL_TIME_ZONE = settings.TIME_ZONE
>>> settings.TIME_ZONE = "UTC"
>>> from timezones import forms, decorators
>>> from timezones.utils import localtime_for_timezone, adjust_datetime_to_timezone
>>> localtime_for_timezone(datetime(2008, 6, 25, 18, 0, 0), "America/Denver").strftime("%m/%d/%Y %H:%I:%S")
'06/25/2008 12:12:00'
# the default case where no timezone is given explicitly. uses settings.TIME_ZONE.
>>> f = forms.LocalizedDateTimeField()
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 14, 30, tzinfo=<UTC>)
# specify a timezone explicity. this may come from a UserProfile for example.
>>> f = forms.LocalizedDateTimeField(timezone="America/Denver")
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 20, 30, tzinfo=<UTC>)
>>> f = forms.TimeZoneField()
>>> f.clean('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> f = forms.TimeZoneField(required=False)
>>> f.clean('')
u''
>>> f.clean('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> class Foo(object):
... datetime = datetime(2008, 6, 20, 23, 58, 17)
... @decorators.localdatetime('datetime')
... def localdatetime(self):
... return 'Australia/Lindeman'
...
>>> foo = Foo()
>>> foo.datetime
datetime.datetime(2008, 6, 20, 23, 58, 17)
>>> foo.localdatetime
datetime.datetime(2008, 6, 21, 9, 58, 17, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
>>> foo.localdatetime = datetime(2008, 6, 12, 23, 50, 0)
>>> foo.datetime
datetime.datetime(2008, 6, 12, 13, 50, tzinfo=<UTC>)
>>> foo.localdatetime
datetime.datetime(2008, 6, 12, 23, 50, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
>>> settings.TIME_ZONE = ORIGINAL_TIME_ZONE
"""}
| gpl-3.0 |
atztogo/phonopy | test/structure/test_atoms.py | 1 | 1399 | import unittest
from phonopy.structure.atoms import PhonopyAtoms
class TestCell(unittest.TestCase):
def setUp(self):
symbols = ['Si'] * 2 + ['O'] * 4
lattice = [[4.65, 0, 0],
[0, 4.75, 0],
[0, 0, 3.25]]
points = [[0.0, 0.0, 0.0],
[0.5, 0.5, 0.5],
[0.3, 0.3, 0.0],
[0.7, 0.7, 0.0],
[0.2, 0.8, 0.5],
[0.8, 0.2, 0.5]]
self._cells = []
self._cells.append(PhonopyAtoms(cell=lattice,
scaled_positions=points,
symbols=symbols))
# The element for which mass is not defined.
symbols = ['Ac'] * 2 + ['O'] * 4
self._cells.append(PhonopyAtoms(cell=lattice,
scaled_positions=points,
symbols=symbols))
def tearDown(self):
pass
def test_atoms(self):
for cell in self._cells:
print(cell.get_cell())
for s, p in zip(cell.get_chemical_symbols(),
cell.get_scaled_positions()):
print("%s %s" % (s, p))
def test_phonopy_atoms(self):
for cell in self._cells:
print(PhonopyAtoms(atoms=cell))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
clovett/MissionPlanner | Lib/distutils/config.py | 53 | 4336 | """distutils.pypirc
Provides the PyPIRCCommand class, the base class for the command classes
that uses .pypirc in the distutils.command package.
"""
import os
from ConfigParser import ConfigParser
from distutils.cmd import Command
DEFAULT_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:%s
password:%s
"""
class PyPIRCCommand(Command):
"""Base command that knows how to handle the .pypirc file
"""
DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
repository = None
realm = None
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % \
DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server')]
boolean_options = ['show-response']
def _get_rc_file(self):
"""Returns rc file path."""
return os.path.join(os.path.expanduser('~'), '.pypirc')
def _store_pypirc(self, username, password):
"""Creates a default .pypirc file."""
rc = self._get_rc_file()
f = open(rc, 'w')
try:
f.write(DEFAULT_PYPIRC % (username, password))
finally:
f.close()
try:
os.chmod(rc, 0600)
except OSError:
# should do something better here
pass
def _read_pypirc(self):
"""Reads the .pypirc file."""
rc = self._get_rc_file()
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
repository = self.repository or self.DEFAULT_REPOSITORY
config = ConfigParser()
config.read(rc)
sections = config.sections()
if 'distutils' in sections:
# let's get the list of servers
index_servers = config.get('distutils', 'index-servers')
_servers = [server.strip() for server in
index_servers.split('\n')
if server.strip() != '']
if _servers == []:
# nothing set, let's try to get the default pypi
if 'pypi' in sections:
_servers = ['pypi']
else:
# the file is not properly defined, returning
# an empty dict
return {}
for server in _servers:
current = {'server': server}
current['username'] = config.get(server, 'username')
# optional params
for key, default in (('repository',
self.DEFAULT_REPOSITORY),
('realm', self.DEFAULT_REALM),
('password', None)):
if config.has_option(server, key):
current[key] = config.get(server, key)
else:
current[key] = default
if (current['server'] == repository or
current['repository'] == repository):
return current
elif 'server-login' in sections:
# old format
server = 'server-login'
if config.has_option(server, 'repository'):
repository = config.get(server, 'repository')
else:
repository = self.DEFAULT_REPOSITORY
return {'username': config.get(server, 'username'),
'password': config.get(server, 'password'),
'repository': repository,
'server': server,
'realm': self.DEFAULT_REALM}
return {}
def initialize_options(self):
"""Initialize options."""
self.repository = None
self.realm = None
self.show_response = 0
def finalize_options(self):
"""Finalizes options."""
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
if self.realm is None:
self.realm = self.DEFAULT_REALM
| gpl-3.0 |
briancappello/PyTradeLib | pytradelib/store.py | 1 | 8189 | import os
import pytz
import talib as ta
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pytradelib.settings import DATA_DIR, ZIPLINE_CACHE_DIR
from pytradelib.utils import (
percent_change,
within_percent_of_value,
crossed,
)
class CSVStore(object):
_symbols = []
_csv_files = {}
_start_dates = {}
_end_dates = {}
_df_cache = {}
def __init__(self):
self._store_contents = self._get_store_contents()
for d in self._store_contents:
symbol = d['symbol']
self._symbols.append(symbol)
self._csv_files[symbol] = d['csv_path']
self._start_dates[symbol] = d['start']
self._end_dates[symbol] = d['end']
self._symbols.sort()
@property
def symbols(self):
return self._symbols
def get_df(self, symbol, start=None, end=None):
symbol = symbol.upper()
def to_timestamp(dt, default):
if dt:
return pd.Timestamp(dt, tz=pytz.UTC)
return default
start = to_timestamp(start, 0)
end = to_timestamp(end, None)
df = self._df_cache.get(symbol, None)
if df is None:
csv_path = self._csv_files[symbol]
df = pd.DataFrame.from_csv(csv_path).tz_localize(pytz.UTC)
self._df_cache[symbol] = df
return df[start:end]
def set_df(self, symbol, df):
self._update_df(symbol, df)
def _update_df(self, symbol, df):
symbol = symbol.upper()
if symbol in self.symbols:
existing_df = self.get_df(symbol)
df = existing_df.append(df[existing_df.index[-1] + DateOffset(days=1):])
os.remove(self.get_csv_path(symbol))
df = self._add_ta(df)
csv_path = self._get_store_path(symbol, df.index[0], df.index[-1])
df.to_csv(csv_path)
self._set_csv_path(symbol, csv_path)
self._set_end_date(symbol, df.index[-1])
self._df_cache[symbol] = df
def get_dfs(self, symbols=None, start=None, end=None):
symbols = symbols or self.symbols
if not isinstance(symbols, list):
symbols = [symbols]
return dict(zip(
[symbol.upper() for symbol in symbols],
[self.get_df(symbol, start, end) for symbol in symbols]
))
def set_dfs(self, symbol_df_dict):
for symbol, df in symbol_df_dict.items():
self.set_df(symbol, df)
def analyze(self, symbols=None, use_adjusted=True):
'''
:param symbols: list of symbols (defaults to all in the store)
:param use_adjusted: whether or not to use adjusted prices
:return: DataFrame
'''
def key(price_key):
return 'Adj ' + price_key if use_adjusted else price_key
results = {}
for symbol, df in self.get_dfs(symbols).items():
today = df.iloc[-1]
yesterday = df.iloc[-2]
most_recent_year = df[df.index[-1] - DateOffset(years=1):]
year_low = most_recent_year[key('Low')][:-1].min()
year_high = most_recent_year[key('High')][:-1].max()
dollar_volume_desc = most_recent_year.dollar_volume.describe()
results[symbol] = {
# last-bar metrics
'previous_close': yesterday[key('Close')],
'close': today[key('Close')],
'percent_change': percent_change(yesterday[key('Close')], today[key('Close')]),
'dollar_volume': today.dollar_volume,
'percent_of_median_volume': (today.Volume * today[key('Close')]) / dollar_volume_desc['50%'],
'advancing': today[key('Close')] > yesterday[key('Close')],
'declining': today[key('Close')] < yesterday[key('Close')],
'new_low': today[key('Close')] < year_low,
'new_high': today[key('Close')] > year_high,
'percent_of_high': today[key('Close')] / year_high,
'near_sma100': within_percent_of_value(today[key('Close')], today['sma100'], 2),
'near_sma200': within_percent_of_value(today[key('Close')], today['sma200'], 2),
'crossed_sma100': crossed(today['sma100'], yesterday, today),
'crossed_sma200': crossed(today['sma200'], yesterday, today),
'crossed_5': crossed(5, yesterday, today),
'crossed_10': crossed(10, yesterday, today),
'crossed_50': crossed(50, yesterday, today),
'crossed_100': crossed(100, yesterday, today),
# stock "health" metrics
'year_high': year_high,
'year_low': year_low,
'min_volume': int(dollar_volume_desc['min']),
'dollar_volume_25th_percentile': int(dollar_volume_desc['25%']),
'dollar_volume_75th_percentile': int(dollar_volume_desc['75%']),
'atr': most_recent_year.atr.describe()['50%'],
}
# transpose the DataFrame so we have rows of symbols, and metrics as columns
return pd.DataFrame(results).T
def get_start_date(self, symbol):
return self._start_dates[symbol.upper()]
def _set_start_date(self, symbol, start_date):
self._start_dates[symbol] = start_date
def get_end_date(self, symbol):
return self._end_dates[symbol.upper()]
def _set_end_date(self, symbol, end_date):
self._end_dates[symbol] = end_date
def get_csv_path(self, symbol):
return self._csv_files[symbol]
def _set_csv_path(self, symbol, csv_path):
self._csv_files[symbol] = csv_path
def _get_store_contents(self):
csv_files = [os.path.join(ZIPLINE_CACHE_DIR, f)\
for f in os.listdir(ZIPLINE_CACHE_DIR)\
if f.endswith('.csv')]
return [self._decode_store_path(path) for path in csv_files]
def _decode_store_path(self, csv_path):
filename = os.path.basename(csv_path).replace('--', os.path.sep)
symbol = filename[:filename.find('-')]
dates = filename[len(symbol)+1:].replace('.csv', '')
start = dates[:len(dates)/2]
end = dates[len(dates)/2:]
def to_dt(dt_str):
date, time = dt_str.split(' ')
return pd.Timestamp(' '.join([date, time.replace('-', ':')]))
return {
'symbol': symbol,
'csv_path': csv_path,
'start': to_dt(start),
'end': to_dt(end),
}
def _get_store_path(self, symbol, start, end):
'''
:param symbol: string - the ticker
:param start: pd.Timestamp - the earliest date
:param end: pd.Timestamp - the latest date
:return: string - path for the CSV
'''
filename_format = '%(symbol)s-%(start)s-%(end)s.csv'
return os.path.join(ZIPLINE_CACHE_DIR, filename_format % {
'symbol': symbol.upper().replace(os.path.sep, '--'),
'start': start,
'end': end,
}).replace(':', '-')
def _add_ta(self, df, use_adjusted=True):
def key(price_key):
return 'Adj ' + price_key if use_adjusted else price_key
df['dollar_volume'] = df[key('Close')] * df.Volume
df['atr'] = ta.NATR(df[key('High')].values, df[key('Low')].values, df[key('Close')].values) # timeperiod 14
df['sma100'] = ta.SMA(df[key('Close')].values, timeperiod=100)
df['sma200'] = ta.SMA(df[key('Close')].values, timeperiod=200)
df['bbands_upper'], df['sma20'], df['bbands_lower'] = ta.BBANDS(df[key('Close')].values, timeperiod=20)
df['macd_lead'], df['macd_lag'], df['macd_divergence'] = ta.MACD(df[key('Close')].values) # 12, 26, 9
df['rsi'] = ta.RSI(df[key('Close')].values) # 14
df['stoch_lead'], df['stoch_lag'] = ta.STOCH(df[key('High')].values, df[key('Low')].values, df[key('Close')].values,
fastk_period=14, slowk_period=1, slowd_period=3)
df['slope'] = ta.LINEARREG_SLOPE(df[key('Close')].values) * -1 # talib returns the inverse of what we want
return df
| gpl-3.0 |
jh23453/privacyidea | tests/test_api_events.py | 2 | 15101 | import json
from .base import MyTestCase
class APIEventsTestCase(MyTestCase):
def test_01_crud_events(self):
# list empty events
with self.app.test_request_context('/event',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), [])
# create an event configuration
param = {
"name": "Send an email",
"event": "token_init",
"action": "sendmail",
"handlermodule": "UserNotification",
"conditions": '{"blabla": "yes"}'
}
with self.app.test_request_context('/event',
data=param,
method='POST',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# check the event
with self.app.test_request_context('/event',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value")[0].get("action"), "sendmail")
self.assertEqual(result.get("value")[0].get("conditions"),
{"blabla": "yes"})
# update event config
param = {
"name": "A new name",
"event": "token_init",
"action": "sendmail",
"handlermodule": "UserNotification",
"conditions": '{"always": "yes"}',
"id": 1
}
with self.app.test_request_context('/event',
data=param,
method='POST',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# check the event
with self.app.test_request_context('/event',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value")[0].get("action"),
"sendmail")
self.assertEqual(result.get("value")[0].get("conditions"),
{"always": "yes"})
# get one single event
with self.app.test_request_context('/event/1',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value")[0].get("action"),
"sendmail")
self.assertEqual(result.get("value")[0].get("conditions"),
{"always": "yes"})
# delete event
with self.app.test_request_context('/event/1',
method='DELETE',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# list empty events
with self.app.test_request_context('/event',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), [])
def test_02_test_options(self):
# create an event configuration
param = {
"name": "Send an email via themis",
"event": "token_init",
"action": "sendmail",
"handlermodule": "UserNotification",
"conditions": '{"blabla": "yes"}',
"option.emailconfig": "themis",
"option.2": "value2"
}
with self.app.test_request_context('/event',
data=param,
method='POST',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# list event with options
with self.app.test_request_context('/event',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
event_list = result.get("value")
self.assertEqual(len(event_list), 1)
self.assertEqual(event_list[0].get("action"), "sendmail")
self.assertEqual(event_list[0].get("event"), ["token_init"])
self.assertEqual(event_list[0].get("options").get("2"), "value2")
self.assertEqual(event_list[0].get("options").get("emailconfig"),
"themis")
# delete event
with self.app.test_request_context('/event/1',
method='DELETE',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# list empty events
with self.app.test_request_context('/event',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), [])
def test_03_available_events(self):
with self.app.test_request_context('/event/available',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertTrue("token_init" in result.get("value"))
self.assertTrue("token_assign" in result.get("value"))
self.assertTrue("token_unassign" in result.get("value"))
def test_04_handler_modules(self):
with self.app.test_request_context('/event/handlermodules',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertTrue("UserNotification" in result.get("value"))
def test_05_get_handler_actions(self):
with self.app.test_request_context('/event/actions/UserNotification',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue("sendmail" in result.get("value"))
self.assertTrue("sendsms" in result.get("value"))
detail = json.loads(res.data).get("detail")
def test_05_get_handler_conditions(self):
with self.app.test_request_context('/event/conditions/UserNotification',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue("logged_in_user" in result.get("value"))
self.assertTrue("result_value" in result.get("value"))
detail = json.loads(res.data).get("detail")
def test_06_test_enable_disable(self):
# create an event configuration
param = {
"name": "Send an email via themis",
"event": "token_init",
"action": "sendmail",
"handlermodule": "UserNotification",
"conditions": '{"blabla": "yes"}',
"option.emailconfig": "themis",
"option.2": "value2"
}
with self.app.test_request_context('/event',
data=param,
method='POST',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# list event with options
with self.app.test_request_context('/event',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
event_list = result.get("value")
self.assertEqual(len(event_list), 1)
self.assertEqual(event_list[0].get("active"), True)
# disable event
with self.app.test_request_context('/event/disable/1',
method='POST',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
with self.app.test_request_context('/event',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
event_list = result.get("value")
self.assertEqual(len(event_list), 1)
self.assertEqual(event_list[0].get("active"), False)
# Enable event
with self.app.test_request_context('/event/enable/1',
method='POST',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
with self.app.test_request_context('/event',
method='GET',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
event_list = result.get("value")
self.assertEqual(len(event_list), 1)
self.assertEqual(event_list[0].get("active"), True)
# delete event
with self.app.test_request_context('/event/1',
method='DELETE',
headers={
'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), 1)
# list empty events
with self.app.test_request_context('/event',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
detail = json.loads(res.data).get("detail")
self.assertEqual(result.get("value"), [])
| agpl-3.0 |
shravya-ks/ns-3-tcp-prague | src/uan/bindings/modulegen__gcc_LP64.py | 22 | 533240 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.uan', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer [class]
module.add_class('DeviceEnergyModelContainer', import_from_module='ns.energy')
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper [class]
module.add_class('DeviceEnergyModelHelper', allow_subclassing=True, import_from_module='ns.energy')
## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper [class]
module.add_class('EnergySourceHelper', allow_subclassing=True, import_from_module='ns.energy')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## uan-mac-rc.h (module 'uan'): ns3::Reservation [class]
module.add_class('Reservation')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## uan-prop-model.h (module 'uan'): ns3::Tap [class]
module.add_class('Tap')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## traced-value.h (module 'core'): ns3::TracedValue<double> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## uan-address.h (module 'uan'): ns3::UanAddress [class]
module.add_class('UanAddress')
## uan-address.h (module 'uan'): ns3::UanAddress [class]
root_module['ns3::UanAddress'].implicitly_converts_to(root_module['ns3::Address'])
## uan-helper.h (module 'uan'): ns3::UanHelper [class]
module.add_class('UanHelper')
## uan-tx-mode.h (module 'uan'): ns3::UanModesList [class]
module.add_class('UanModesList')
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival [class]
module.add_class('UanPacketArrival')
## uan-prop-model.h (module 'uan'): ns3::UanPdp [class]
module.add_class('UanPdp')
## uan-phy.h (module 'uan'): ns3::UanPhyListener [class]
module.add_class('UanPhyListener', allow_subclassing=True)
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode [class]
module.add_class('UanTxMode')
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::ModulationType [enumeration]
module.add_enum('ModulationType', ['PSK', 'QAM', 'FSK', 'OTHER'], outer_class=root_module['ns3::UanTxMode'])
## uan-tx-mode.h (module 'uan'): ns3::UanTxModeFactory [class]
module.add_class('UanTxModeFactory')
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::AcousticModemEnergyModelHelper [class]
module.add_class('AcousticModemEnergyModelHelper', parent=root_module['ns3::DeviceEnergyModelHelper'])
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon [class]
module.add_class('UanHeaderCommon', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcAck [class]
module.add_class('UanHeaderRcAck', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts [class]
module.add_class('UanHeaderRcCts', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal [class]
module.add_class('UanHeaderRcCtsGlobal', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData [class]
module.add_class('UanHeaderRcData', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts [class]
module.add_class('UanHeaderRcRts', parent=root_module['ns3::Header'])
## uan-mac.h (module 'uan'): ns3::UanMac [class]
module.add_class('UanMac', parent=root_module['ns3::Object'])
## uan-mac-aloha.h (module 'uan'): ns3::UanMacAloha [class]
module.add_class('UanMacAloha', parent=root_module['ns3::UanMac'])
## uan-mac-cw.h (module 'uan'): ns3::UanMacCw [class]
module.add_class('UanMacCw', parent=[root_module['ns3::UanMac'], root_module['ns3::UanPhyListener']])
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc [class]
module.add_class('UanMacRc', parent=root_module['ns3::UanMac'])
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc [enumeration]
module.add_enum('', ['TYPE_DATA', 'TYPE_GWPING', 'TYPE_RTS', 'TYPE_CTS', 'TYPE_ACK'], outer_class=root_module['ns3::UanMacRc'])
## uan-mac-rc-gw.h (module 'uan'): ns3::UanMacRcGw [class]
module.add_class('UanMacRcGw', parent=root_module['ns3::UanMac'])
## uan-noise-model.h (module 'uan'): ns3::UanNoiseModel [class]
module.add_class('UanNoiseModel', parent=root_module['ns3::Object'])
## uan-noise-model-default.h (module 'uan'): ns3::UanNoiseModelDefault [class]
module.add_class('UanNoiseModelDefault', parent=root_module['ns3::UanNoiseModel'])
## uan-phy.h (module 'uan'): ns3::UanPhy [class]
module.add_class('UanPhy', parent=root_module['ns3::Object'])
## uan-phy.h (module 'uan'): ns3::UanPhy::State [enumeration]
module.add_enum('State', ['IDLE', 'CCABUSY', 'RX', 'TX', 'SLEEP', 'DISABLED'], outer_class=root_module['ns3::UanPhy'])
## uan-phy.h (module 'uan'): ns3::UanPhyCalcSinr [class]
module.add_class('UanPhyCalcSinr', parent=root_module['ns3::Object'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrDefault [class]
module.add_class('UanPhyCalcSinrDefault', parent=root_module['ns3::UanPhyCalcSinr'])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyCalcSinrDual [class]
module.add_class('UanPhyCalcSinrDual', parent=root_module['ns3::UanPhyCalcSinr'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrFhFsk [class]
module.add_class('UanPhyCalcSinrFhFsk', parent=root_module['ns3::UanPhyCalcSinr'])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyDual [class]
module.add_class('UanPhyDual', parent=root_module['ns3::UanPhy'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyGen [class]
module.add_class('UanPhyGen', parent=root_module['ns3::UanPhy'])
## uan-phy.h (module 'uan'): ns3::UanPhyPer [class]
module.add_class('UanPhyPer', parent=root_module['ns3::Object'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerGenDefault [class]
module.add_class('UanPhyPerGenDefault', parent=root_module['ns3::UanPhyPer'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerUmodem [class]
module.add_class('UanPhyPerUmodem', parent=root_module['ns3::UanPhyPer'])
## uan-prop-model.h (module 'uan'): ns3::UanPropModel [class]
module.add_class('UanPropModel', parent=root_module['ns3::Object'])
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPropModelIdeal [class]
module.add_class('UanPropModelIdeal', parent=root_module['ns3::UanPropModel'])
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPropModelThorp [class]
module.add_class('UanPropModelThorp', parent=root_module['ns3::UanPropModel'])
## uan-transducer.h (module 'uan'): ns3::UanTransducer [class]
module.add_class('UanTransducer', parent=root_module['ns3::Object'])
## uan-transducer.h (module 'uan'): ns3::UanTransducer::State [enumeration]
module.add_enum('State', ['TX', 'RX'], outer_class=root_module['ns3::UanTransducer'])
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducerHd [class]
module.add_class('UanTransducerHd', parent=root_module['ns3::UanTransducer'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel [class]
module.add_class('DeviceEnergyModel', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## energy-harvester.h (module 'energy'): ns3::EnergyHarvester [class]
module.add_class('EnergyHarvester', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## energy-source.h (module 'energy'): ns3::EnergySource [class]
module.add_class('EnergySource', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer [class]
module.add_class('EnergySourceContainer', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mobility-model.h (module 'mobility'): ns3::MobilityModel [class]
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## pointer.h (module 'core'): ns3::PointerChecker [class]
module.add_class('PointerChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## pointer.h (module 'core'): ns3::PointerValue [class]
module.add_class('PointerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uan-channel.h (module 'uan'): ns3::UanChannel [class]
module.add_class('UanChannel', parent=root_module['ns3::Channel'])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListChecker [class]
module.add_class('UanModesListChecker', parent=root_module['ns3::AttributeChecker'])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue [class]
module.add_class('UanModesListValue', parent=root_module['ns3::AttributeValue'])
## uan-net-device.h (module 'uan'): ns3::UanNetDevice [class]
module.add_class('UanNetDevice', parent=root_module['ns3::NetDevice'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## acoustic-modem-energy-model.h (module 'uan'): ns3::AcousticModemEnergyModel [class]
module.add_class('AcousticModemEnergyModel', parent=root_module['ns3::DeviceEnergyModel'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > >', 'std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress >', container_type=u'list')
module.add_container('std::vector< ns3::Tap >', 'ns3::Tap', container_type=u'vector')
module.add_container('std::vector< std::complex< double > >', 'std::complex< double >', container_type=u'vector')
module.add_container('std::vector< double >', 'double', container_type=u'vector')
module.add_container('std::set< unsigned char >', 'unsigned char', container_type=u'set')
module.add_container('std::list< ns3::UanPacketArrival >', 'ns3::UanPacketArrival', container_type=u'list')
module.add_container('std::list< ns3::Ptr< ns3::UanPhy > >', 'ns3::Ptr< ns3::UanPhy >', container_type=u'list')
module.add_container('std::vector< std::pair< ns3::Ptr< ns3::UanNetDevice >, ns3::Ptr< ns3::UanTransducer > > >', 'std::pair< ns3::Ptr< ns3::UanNetDevice >, ns3::Ptr< ns3::UanTransducer > >', container_type=u'vector')
module.add_container('std::list< ns3::Ptr< ns3::UanTransducer > >', 'ns3::Ptr< ns3::UanTransducer >', container_type=u'list')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) **', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) **', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) **', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) **', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) **', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) **', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) **', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( ) *', u'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias(u'void ( * ) ( ) **', u'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias(u'void ( * ) ( ) *&', u'ns3::TracedValueCallback::Void&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) **', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *&', u'ns3::TracedValueCallback::Uint16&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer'])
register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper'])
register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3Reservation_methods(root_module, root_module['ns3::Reservation'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Tap_methods(root_module, root_module['ns3::Tap'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3UanAddress_methods(root_module, root_module['ns3::UanAddress'])
register_Ns3UanHelper_methods(root_module, root_module['ns3::UanHelper'])
register_Ns3UanModesList_methods(root_module, root_module['ns3::UanModesList'])
register_Ns3UanPacketArrival_methods(root_module, root_module['ns3::UanPacketArrival'])
register_Ns3UanPdp_methods(root_module, root_module['ns3::UanPdp'])
register_Ns3UanPhyListener_methods(root_module, root_module['ns3::UanPhyListener'])
register_Ns3UanTxMode_methods(root_module, root_module['ns3::UanTxMode'])
register_Ns3UanTxModeFactory_methods(root_module, root_module['ns3::UanTxModeFactory'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3AcousticModemEnergyModelHelper_methods(root_module, root_module['ns3::AcousticModemEnergyModelHelper'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UanHeaderCommon_methods(root_module, root_module['ns3::UanHeaderCommon'])
register_Ns3UanHeaderRcAck_methods(root_module, root_module['ns3::UanHeaderRcAck'])
register_Ns3UanHeaderRcCts_methods(root_module, root_module['ns3::UanHeaderRcCts'])
register_Ns3UanHeaderRcCtsGlobal_methods(root_module, root_module['ns3::UanHeaderRcCtsGlobal'])
register_Ns3UanHeaderRcData_methods(root_module, root_module['ns3::UanHeaderRcData'])
register_Ns3UanHeaderRcRts_methods(root_module, root_module['ns3::UanHeaderRcRts'])
register_Ns3UanMac_methods(root_module, root_module['ns3::UanMac'])
register_Ns3UanMacAloha_methods(root_module, root_module['ns3::UanMacAloha'])
register_Ns3UanMacCw_methods(root_module, root_module['ns3::UanMacCw'])
register_Ns3UanMacRc_methods(root_module, root_module['ns3::UanMacRc'])
register_Ns3UanMacRcGw_methods(root_module, root_module['ns3::UanMacRcGw'])
register_Ns3UanNoiseModel_methods(root_module, root_module['ns3::UanNoiseModel'])
register_Ns3UanNoiseModelDefault_methods(root_module, root_module['ns3::UanNoiseModelDefault'])
register_Ns3UanPhy_methods(root_module, root_module['ns3::UanPhy'])
register_Ns3UanPhyCalcSinr_methods(root_module, root_module['ns3::UanPhyCalcSinr'])
register_Ns3UanPhyCalcSinrDefault_methods(root_module, root_module['ns3::UanPhyCalcSinrDefault'])
register_Ns3UanPhyCalcSinrDual_methods(root_module, root_module['ns3::UanPhyCalcSinrDual'])
register_Ns3UanPhyCalcSinrFhFsk_methods(root_module, root_module['ns3::UanPhyCalcSinrFhFsk'])
register_Ns3UanPhyDual_methods(root_module, root_module['ns3::UanPhyDual'])
register_Ns3UanPhyGen_methods(root_module, root_module['ns3::UanPhyGen'])
register_Ns3UanPhyPer_methods(root_module, root_module['ns3::UanPhyPer'])
register_Ns3UanPhyPerGenDefault_methods(root_module, root_module['ns3::UanPhyPerGenDefault'])
register_Ns3UanPhyPerUmodem_methods(root_module, root_module['ns3::UanPhyPerUmodem'])
register_Ns3UanPropModel_methods(root_module, root_module['ns3::UanPropModel'])
register_Ns3UanPropModelIdeal_methods(root_module, root_module['ns3::UanPropModelIdeal'])
register_Ns3UanPropModelThorp_methods(root_module, root_module['ns3::UanPropModelThorp'])
register_Ns3UanTransducer_methods(root_module, root_module['ns3::UanTransducer'])
register_Ns3UanTransducerHd_methods(root_module, root_module['ns3::UanTransducerHd'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnergyHarvester_methods(root_module, root_module['ns3::EnergyHarvester'])
register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource'])
register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UanChannel_methods(root_module, root_module['ns3::UanChannel'])
register_Ns3UanModesListChecker_methods(root_module, root_module['ns3::UanModesListChecker'])
register_Ns3UanModesListValue_methods(root_module, root_module['ns3::UanModesListValue'])
register_Ns3UanNetDevice_methods(root_module, root_module['ns3::UanNetDevice'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3AcousticModemEnergyModel_methods(root_module, root_module['ns3::AcousticModemEnergyModel'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DeviceEnergyModelContainer_methods(root_module, cls):
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'arg0')])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer() [constructor]
cls.add_constructor([])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::Ptr<ns3::DeviceEnergyModel> model) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(std::string modelName) [constructor]
cls.add_constructor([param('std::string', 'modelName')])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & a, ns3::DeviceEnergyModelContainer const & b) [constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'a'), param('ns3::DeviceEnergyModelContainer const &', 'b')])
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(ns3::DeviceEnergyModelContainer container) [member function]
cls.add_method('Add',
'void',
[param('ns3::DeviceEnergyModelContainer', 'container')])
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(ns3::Ptr<ns3::DeviceEnergyModel> model) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')])
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(std::string modelName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'modelName')])
## device-energy-model-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::DeviceEnergyModel>*,std::vector<ns3::Ptr<ns3::DeviceEnergyModel>, std::allocator<ns3::Ptr<ns3::DeviceEnergyModel> > > > ns3::DeviceEnergyModelContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::DeviceEnergyModel > const, std::vector< ns3::Ptr< ns3::DeviceEnergyModel > > >',
[],
is_const=True)
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## device-energy-model-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::DeviceEnergyModel>*,std::vector<ns3::Ptr<ns3::DeviceEnergyModel>, std::allocator<ns3::Ptr<ns3::DeviceEnergyModel> > > > ns3::DeviceEnergyModelContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::DeviceEnergyModel > const, std::vector< ns3::Ptr< ns3::DeviceEnergyModel > > >',
[],
is_const=True)
## device-energy-model-container.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::DeviceEnergyModelContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::DeviceEnergyModel >',
[param('uint32_t', 'i')],
is_const=True)
## device-energy-model-container.h (module 'energy'): uint32_t ns3::DeviceEnergyModelContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3DeviceEnergyModelHelper_methods(root_module, cls):
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper::DeviceEnergyModelHelper() [constructor]
cls.add_constructor([])
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper::DeviceEnergyModelHelper(ns3::DeviceEnergyModelHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelHelper const &', 'arg0')])
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::DeviceEnergyModelHelper::Install(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function]
cls.add_method('Install',
'ns3::DeviceEnergyModelContainer',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::DeviceEnergyModelHelper::Install(ns3::NetDeviceContainer deviceContainer, ns3::EnergySourceContainer sourceContainer) const [member function]
cls.add_method('Install',
'ns3::DeviceEnergyModelContainer',
[param('ns3::NetDeviceContainer', 'deviceContainer'), param('ns3::EnergySourceContainer', 'sourceContainer')],
is_const=True)
## energy-model-helper.h (module 'energy'): void ns3::DeviceEnergyModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')],
is_pure_virtual=True, is_virtual=True)
## energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::DeviceEnergyModelHelper::DoInstall(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function]
cls.add_method('DoInstall',
'ns3::Ptr< ns3::DeviceEnergyModel >',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnergySourceHelper_methods(root_module, cls):
## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper::EnergySourceHelper() [constructor]
cls.add_constructor([])
## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper::EnergySourceHelper(ns3::EnergySourceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergySourceHelper const &', 'arg0')])
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::EnergySourceContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::EnergySourceContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::EnergySourceContainer',
[param('std::string', 'nodeName')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::InstallAll() const [member function]
cls.add_method('InstallAll',
'ns3::EnergySourceContainer',
[],
is_const=True)
## energy-model-helper.h (module 'energy'): void ns3::EnergySourceHelper::Set(std::string name, ns3::AttributeValue const & v) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')],
is_pure_virtual=True, is_virtual=True)
## energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergySourceHelper::DoInstall(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('DoInstall',
'ns3::Ptr< ns3::EnergySource >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3Reservation_methods(root_module, cls):
## uan-mac-rc.h (module 'uan'): ns3::Reservation::Reservation(ns3::Reservation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Reservation const &', 'arg0')])
## uan-mac-rc.h (module 'uan'): ns3::Reservation::Reservation() [constructor]
cls.add_constructor([])
## uan-mac-rc.h (module 'uan'): ns3::Reservation::Reservation(std::list<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress>, std::allocator<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress> > > & list, uint8_t frameNo, uint32_t maxPkts=0) [constructor]
cls.add_constructor([param('std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > > &', 'list'), param('uint8_t', 'frameNo'), param('uint32_t', 'maxPkts', default_value='0')])
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::AddTimestamp(ns3::Time t) [member function]
cls.add_method('AddTimestamp',
'void',
[param('ns3::Time', 't')])
## uan-mac-rc.h (module 'uan'): uint8_t ns3::Reservation::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): uint32_t ns3::Reservation::GetLength() const [member function]
cls.add_method('GetLength',
'uint32_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): uint32_t ns3::Reservation::GetNoFrames() const [member function]
cls.add_method('GetNoFrames',
'uint32_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): std::list<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress>, std::allocator<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress> > > const & ns3::Reservation::GetPktList() const [member function]
cls.add_method('GetPktList',
'std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > > const &',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): uint8_t ns3::Reservation::GetRetryNo() const [member function]
cls.add_method('GetRetryNo',
'uint8_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): ns3::Time ns3::Reservation::GetTimestamp(uint8_t n) const [member function]
cls.add_method('GetTimestamp',
'ns3::Time',
[param('uint8_t', 'n')],
is_const=True)
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::IncrementRetry() [member function]
cls.add_method('IncrementRetry',
'void',
[])
## uan-mac-rc.h (module 'uan'): bool ns3::Reservation::IsTransmitted() const [member function]
cls.add_method('IsTransmitted',
'bool',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::SetFrameNo(uint8_t fn) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'fn')])
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::SetTransmitted(bool t=true) [member function]
cls.add_method('SetTransmitted',
'void',
[param('bool', 't', default_value='true')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3Tap_methods(root_module, cls):
## uan-prop-model.h (module 'uan'): ns3::Tap::Tap(ns3::Tap const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tap const &', 'arg0')])
## uan-prop-model.h (module 'uan'): ns3::Tap::Tap() [constructor]
cls.add_constructor([])
## uan-prop-model.h (module 'uan'): ns3::Tap::Tap(ns3::Time delay, std::complex<double> amp) [constructor]
cls.add_constructor([param('ns3::Time', 'delay'), param('std::complex< double >', 'amp')])
## uan-prop-model.h (module 'uan'): std::complex<double> ns3::Tap::GetAmp() const [member function]
cls.add_method('GetAmp',
'std::complex< double >',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): ns3::Time ns3::Tap::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TracedValue__Double_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(double const & v) [constructor]
cls.add_constructor([param('double const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): double ns3::TracedValue<double>::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Set(double const & v) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3UanAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## uan-address.h (module 'uan'): ns3::UanAddress::UanAddress(ns3::UanAddress const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanAddress const &', 'arg0')])
## uan-address.h (module 'uan'): ns3::UanAddress::UanAddress() [constructor]
cls.add_constructor([])
## uan-address.h (module 'uan'): ns3::UanAddress::UanAddress(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## uan-address.h (module 'uan'): static ns3::UanAddress ns3::UanAddress::Allocate() [member function]
cls.add_method('Allocate',
'ns3::UanAddress',
[],
is_static=True)
## uan-address.h (module 'uan'): static ns3::UanAddress ns3::UanAddress::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::UanAddress',
[param('ns3::Address const &', 'address')],
is_static=True)
## uan-address.h (module 'uan'): void ns3::UanAddress::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## uan-address.h (module 'uan'): void ns3::UanAddress::CopyTo(uint8_t * pBuffer) [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')])
## uan-address.h (module 'uan'): uint8_t ns3::UanAddress::GetAsInt() const [member function]
cls.add_method('GetAsInt',
'uint8_t',
[],
is_const=True)
## uan-address.h (module 'uan'): static ns3::UanAddress ns3::UanAddress::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::UanAddress',
[],
is_static=True)
## uan-address.h (module 'uan'): static bool ns3::UanAddress::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3UanHelper_methods(root_module, cls):
## uan-helper.h (module 'uan'): ns3::UanHelper::UanHelper(ns3::UanHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHelper const &', 'arg0')])
## uan-helper.h (module 'uan'): ns3::UanHelper::UanHelper() [constructor]
cls.add_constructor([])
## uan-helper.h (module 'uan'): int64_t ns3::UanHelper::AssignStreams(ns3::NetDeviceContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAscii(std::ostream & os, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::ostream &', 'os'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')],
is_static=True)
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAscii(std::ostream & os, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::ostream &', 'os'), param('ns3::NetDeviceContainer', 'd')],
is_static=True)
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAscii(std::ostream & os, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::ostream &', 'os'), param('ns3::NodeContainer', 'n')],
is_static=True)
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAsciiAll(std::ostream & os) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::ostream &', 'os')],
is_static=True)
## uan-helper.h (module 'uan'): ns3::NetDeviceContainer ns3::UanHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## uan-helper.h (module 'uan'): ns3::NetDeviceContainer ns3::UanHelper::Install(ns3::NodeContainer c, ns3::Ptr<ns3::UanChannel> channel) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_const=True)
## uan-helper.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanHelper::Install(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::UanChannel> channel) const [member function]
cls.add_method('Install',
'ns3::Ptr< ns3::UanNetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_const=True)
## uan-helper.h (module 'uan'): void ns3::UanHelper::SetMac(std::string type, std::string n0="", ns3::AttributeValue const & v0=ns3::EmptyAttributeValue(), std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue(), std::string n5="", ns3::AttributeValue const & v5=ns3::EmptyAttributeValue(), std::string n6="", ns3::AttributeValue const & v6=ns3::EmptyAttributeValue(), std::string n7="", ns3::AttributeValue const & v7=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetMac',
'void',
[param('std::string', 'type'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
## uan-helper.h (module 'uan'): void ns3::UanHelper::SetPhy(std::string phyType, std::string n0="", ns3::AttributeValue const & v0=ns3::EmptyAttributeValue(), std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue(), std::string n5="", ns3::AttributeValue const & v5=ns3::EmptyAttributeValue(), std::string n6="", ns3::AttributeValue const & v6=ns3::EmptyAttributeValue(), std::string n7="", ns3::AttributeValue const & v7=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetPhy',
'void',
[param('std::string', 'phyType'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
## uan-helper.h (module 'uan'): void ns3::UanHelper::SetTransducer(std::string type, std::string n0="", ns3::AttributeValue const & v0=ns3::EmptyAttributeValue(), std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue(), std::string n5="", ns3::AttributeValue const & v5=ns3::EmptyAttributeValue(), std::string n6="", ns3::AttributeValue const & v6=ns3::EmptyAttributeValue(), std::string n7="", ns3::AttributeValue const & v7=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetTransducer',
'void',
[param('std::string', 'type'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
return
def register_Ns3UanModesList_methods(root_module, cls):
cls.add_output_stream_operator()
## uan-tx-mode.h (module 'uan'): ns3::UanModesList::UanModesList(ns3::UanModesList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanModesList const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanModesList::UanModesList() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): void ns3::UanModesList::AppendMode(ns3::UanTxMode mode) [member function]
cls.add_method('AppendMode',
'void',
[param('ns3::UanTxMode', 'mode')])
## uan-tx-mode.h (module 'uan'): void ns3::UanModesList::DeleteMode(uint32_t num) [member function]
cls.add_method('DeleteMode',
'void',
[param('uint32_t', 'num')])
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanModesList::GetNModes() const [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_const=True)
return
def register_Ns3UanPacketArrival_methods(root_module, cls):
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival::UanPacketArrival(ns3::UanPacketArrival const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPacketArrival const &', 'arg0')])
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival::UanPacketArrival() [constructor]
cls.add_constructor([])
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival::UanPacketArrival(ns3::Ptr<ns3::Packet> packet, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp, ns3::Time arrTime) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp'), param('ns3::Time', 'arrTime')])
## uan-transducer.h (module 'uan'): ns3::Time ns3::UanPacketArrival::GetArrivalTime() const [member function]
cls.add_method('GetArrivalTime',
'ns3::Time',
[],
is_const=True)
## uan-transducer.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPacketArrival::GetPacket() const [member function]
cls.add_method('GetPacket',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## uan-transducer.h (module 'uan'): ns3::UanPdp ns3::UanPacketArrival::GetPdp() const [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[],
is_const=True)
## uan-transducer.h (module 'uan'): double ns3::UanPacketArrival::GetRxPowerDb() const [member function]
cls.add_method('GetRxPowerDb',
'double',
[],
is_const=True)
## uan-transducer.h (module 'uan'): ns3::UanTxMode const & ns3::UanPacketArrival::GetTxMode() const [member function]
cls.add_method('GetTxMode',
'ns3::UanTxMode const &',
[],
is_const=True)
return
def register_Ns3UanPdp_methods(root_module, cls):
cls.add_output_stream_operator()
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(ns3::UanPdp const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPdp const &', 'arg0')])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp() [constructor]
cls.add_constructor([])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(std::vector<ns3::Tap, std::allocator<ns3::Tap> > taps, ns3::Time resolution) [constructor]
cls.add_constructor([param('std::vector< ns3::Tap >', 'taps'), param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(std::vector<std::complex<double>,std::allocator<std::complex<double> > > arrivals, ns3::Time resolution) [constructor]
cls.add_constructor([param('std::vector< std::complex< double > >', 'arrivals'), param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(std::vector<double,std::allocator<double> > arrivals, ns3::Time resolution) [constructor]
cls.add_constructor([param('std::vector< double >', 'arrivals'), param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): static ns3::UanPdp ns3::UanPdp::CreateImpulsePdp() [member function]
cls.add_method('CreateImpulsePdp',
'ns3::UanPdp',
[],
is_static=True)
## uan-prop-model.h (module 'uan'): __gnu_cxx::__normal_iterator<const ns3::Tap*,std::vector<ns3::Tap, std::allocator<ns3::Tap> > > ns3::UanPdp::GetBegin() const [member function]
cls.add_method('GetBegin',
'__gnu_cxx::__normal_iterator< ns3::Tap const *, std::vector< ns3::Tap > >',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): __gnu_cxx::__normal_iterator<const ns3::Tap*,std::vector<ns3::Tap, std::allocator<ns3::Tap> > > ns3::UanPdp::GetEnd() const [member function]
cls.add_method('GetEnd',
'__gnu_cxx::__normal_iterator< ns3::Tap const *, std::vector< ns3::Tap > >',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): uint32_t ns3::UanPdp::GetNTaps() const [member function]
cls.add_method('GetNTaps',
'uint32_t',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): ns3::Time ns3::UanPdp::GetResolution() const [member function]
cls.add_method('GetResolution',
'ns3::Time',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): ns3::Tap const & ns3::UanPdp::GetTap(uint32_t i) const [member function]
cls.add_method('GetTap',
'ns3::Tap const &',
[param('uint32_t', 'i')],
is_const=True)
## uan-prop-model.h (module 'uan'): void ns3::UanPdp::SetNTaps(uint32_t nTaps) [member function]
cls.add_method('SetNTaps',
'void',
[param('uint32_t', 'nTaps')])
## uan-prop-model.h (module 'uan'): void ns3::UanPdp::SetResolution(ns3::Time resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): void ns3::UanPdp::SetTap(std::complex<double> arrival, uint32_t index) [member function]
cls.add_method('SetTap',
'void',
[param('std::complex< double >', 'arrival'), param('uint32_t', 'index')])
## uan-prop-model.h (module 'uan'): std::complex<double> ns3::UanPdp::SumTapsC(ns3::Time begin, ns3::Time end) const [member function]
cls.add_method('SumTapsC',
'std::complex< double >',
[param('ns3::Time', 'begin'), param('ns3::Time', 'end')],
is_const=True)
## uan-prop-model.h (module 'uan'): std::complex<double> ns3::UanPdp::SumTapsFromMaxC(ns3::Time delay, ns3::Time duration) const [member function]
cls.add_method('SumTapsFromMaxC',
'std::complex< double >',
[param('ns3::Time', 'delay'), param('ns3::Time', 'duration')],
is_const=True)
## uan-prop-model.h (module 'uan'): double ns3::UanPdp::SumTapsFromMaxNc(ns3::Time delay, ns3::Time duration) const [member function]
cls.add_method('SumTapsFromMaxNc',
'double',
[param('ns3::Time', 'delay'), param('ns3::Time', 'duration')],
is_const=True)
## uan-prop-model.h (module 'uan'): double ns3::UanPdp::SumTapsNc(ns3::Time begin, ns3::Time end) const [member function]
cls.add_method('SumTapsNc',
'double',
[param('ns3::Time', 'begin'), param('ns3::Time', 'end')],
is_const=True)
return
def register_Ns3UanPhyListener_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhyListener::UanPhyListener() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhyListener::UanPhyListener(ns3::UanPhyListener const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyListener const &', 'arg0')])
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyCcaEnd() [member function]
cls.add_method('NotifyCcaEnd',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyCcaStart() [member function]
cls.add_method('NotifyCcaStart',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyRxEndError() [member function]
cls.add_method('NotifyRxEndError',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyRxEndOk() [member function]
cls.add_method('NotifyRxEndOk',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyRxStart() [member function]
cls.add_method('NotifyRxStart',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyTxStart(ns3::Time duration) [member function]
cls.add_method('NotifyTxStart',
'void',
[param('ns3::Time', 'duration')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanTxMode_methods(root_module, cls):
cls.add_output_stream_operator()
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::UanTxMode(ns3::UanTxMode const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTxMode const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::UanTxMode() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetBandwidthHz() const [member function]
cls.add_method('GetBandwidthHz',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetCenterFreqHz() const [member function]
cls.add_method('GetCenterFreqHz',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetConstellationSize() const [member function]
cls.add_method('GetConstellationSize',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetDataRateBps() const [member function]
cls.add_method('GetDataRateBps',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::ModulationType ns3::UanTxMode::GetModType() const [member function]
cls.add_method('GetModType',
'ns3::UanTxMode::ModulationType',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): std::string ns3::UanTxMode::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetPhyRateSps() const [member function]
cls.add_method('GetPhyRateSps',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
return
def register_Ns3UanTxModeFactory_methods(root_module, cls):
## uan-tx-mode.h (module 'uan'): ns3::UanTxModeFactory::UanTxModeFactory(ns3::UanTxModeFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTxModeFactory const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanTxModeFactory::UanTxModeFactory() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): static ns3::UanTxMode ns3::UanTxModeFactory::CreateMode(ns3::UanTxMode::ModulationType type, uint32_t dataRateBps, uint32_t phyRateSps, uint32_t cfHz, uint32_t bwHz, uint32_t constSize, std::string name) [member function]
cls.add_method('CreateMode',
'ns3::UanTxMode',
[param('ns3::UanTxMode::ModulationType', 'type'), param('uint32_t', 'dataRateBps'), param('uint32_t', 'phyRateSps'), param('uint32_t', 'cfHz'), param('uint32_t', 'bwHz'), param('uint32_t', 'constSize'), param('std::string', 'name')],
is_static=True)
## uan-tx-mode.h (module 'uan'): static ns3::UanTxMode ns3::UanTxModeFactory::GetMode(std::string name) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('std::string', 'name')],
is_static=True)
## uan-tx-mode.h (module 'uan'): static ns3::UanTxMode ns3::UanTxModeFactory::GetMode(uint32_t uid) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'uid')],
is_static=True)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3AcousticModemEnergyModelHelper_methods(root_module, cls):
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::AcousticModemEnergyModelHelper::AcousticModemEnergyModelHelper(ns3::AcousticModemEnergyModelHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AcousticModemEnergyModelHelper const &', 'arg0')])
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::AcousticModemEnergyModelHelper::AcousticModemEnergyModelHelper() [constructor]
cls.add_constructor([])
## acoustic-modem-energy-model-helper.h (module 'uan'): void ns3::AcousticModemEnergyModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')],
is_virtual=True)
## acoustic-modem-energy-model-helper.h (module 'uan'): void ns3::AcousticModemEnergyModelHelper::SetDepletionCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetDepletionCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::AcousticModemEnergyModelHelper::DoInstall(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function]
cls.add_method('DoInstall',
'ns3::Ptr< ns3::DeviceEnergyModel >',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UanHeaderCommon_methods(root_module, cls):
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon::UanHeaderCommon(ns3::UanHeaderCommon const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderCommon const &', 'arg0')])
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon::UanHeaderCommon() [constructor]
cls.add_constructor([])
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon::UanHeaderCommon(ns3::UanAddress const src, ns3::UanAddress const dest, uint8_t type) [constructor]
cls.add_constructor([param('ns3::UanAddress const', 'src'), param('ns3::UanAddress const', 'dest'), param('uint8_t', 'type')])
## uan-header-common.h (module 'uan'): uint32_t ns3::UanHeaderCommon::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-common.h (module 'uan'): ns3::UanAddress ns3::UanHeaderCommon::GetDest() const [member function]
cls.add_method('GetDest',
'ns3::UanAddress',
[],
is_const=True)
## uan-header-common.h (module 'uan'): ns3::TypeId ns3::UanHeaderCommon::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): uint32_t ns3::UanHeaderCommon::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): ns3::UanAddress ns3::UanHeaderCommon::GetSrc() const [member function]
cls.add_method('GetSrc',
'ns3::UanAddress',
[],
is_const=True)
## uan-header-common.h (module 'uan'): uint8_t ns3::UanHeaderCommon::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## uan-header-common.h (module 'uan'): static ns3::TypeId ns3::UanHeaderCommon::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::SetDest(ns3::UanAddress dest) [member function]
cls.add_method('SetDest',
'void',
[param('ns3::UanAddress', 'dest')])
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::SetSrc(ns3::UanAddress src) [member function]
cls.add_method('SetSrc',
'void',
[param('ns3::UanAddress', 'src')])
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
return
def register_Ns3UanHeaderRcAck_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcAck::UanHeaderRcAck(ns3::UanHeaderRcAck const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcAck const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcAck::UanHeaderRcAck() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::AddNackedFrame(uint8_t frame) [member function]
cls.add_method('AddNackedFrame',
'void',
[param('uint8_t', 'frame')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcAck::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcAck::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcAck::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): std::set<unsigned char, std::less<unsigned char>, std::allocator<unsigned char> > const & ns3::UanHeaderRcAck::GetNackedFrames() const [member function]
cls.add_method('GetNackedFrames',
'std::set< unsigned char > const &',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcAck::GetNoNacks() const [member function]
cls.add_method('GetNoNacks',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcAck::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcAck::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::SetFrameNo(uint8_t frameNo) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'frameNo')])
return
def register_Ns3UanHeaderRcCts_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts::UanHeaderRcCts(ns3::UanHeaderRcCts const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcCts const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts::UanHeaderRcCts() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts::UanHeaderRcCts(uint8_t frameNo, uint8_t retryNo, ns3::Time rtsTs, ns3::Time delay, ns3::UanAddress addr) [constructor]
cls.add_constructor([param('uint8_t', 'frameNo'), param('uint8_t', 'retryNo'), param('ns3::Time', 'rtsTs'), param('ns3::Time', 'delay'), param('ns3::UanAddress', 'addr')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCts::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::UanAddress ns3::UanHeaderRcCts::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::UanAddress',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCts::GetDelayToTx() const [member function]
cls.add_method('GetDelayToTx',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcCts::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcCts::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcCts::GetRetryNo() const [member function]
cls.add_method('GetRetryNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCts::GetRtsTimeStamp() const [member function]
cls.add_method('GetRtsTimeStamp',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCts::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcCts::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetDelayToTx(ns3::Time delay) [member function]
cls.add_method('SetDelayToTx',
'void',
[param('ns3::Time', 'delay')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetFrameNo(uint8_t frameNo) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'frameNo')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetRetryNo(uint8_t no) [member function]
cls.add_method('SetRetryNo',
'void',
[param('uint8_t', 'no')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetRtsTimeStamp(ns3::Time timeStamp) [member function]
cls.add_method('SetRtsTimeStamp',
'void',
[param('ns3::Time', 'timeStamp')])
return
def register_Ns3UanHeaderRcCtsGlobal_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal::UanHeaderRcCtsGlobal(ns3::UanHeaderRcCtsGlobal const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcCtsGlobal const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal::UanHeaderRcCtsGlobal() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal::UanHeaderRcCtsGlobal(ns3::Time wt, ns3::Time ts, uint16_t rate, uint16_t retryRate) [constructor]
cls.add_constructor([param('ns3::Time', 'wt'), param('ns3::Time', 'ts'), param('uint16_t', 'rate'), param('uint16_t', 'retryRate')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCtsGlobal::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcCtsGlobal::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): uint16_t ns3::UanHeaderRcCtsGlobal::GetRateNum() const [member function]
cls.add_method('GetRateNum',
'uint16_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint16_t ns3::UanHeaderRcCtsGlobal::GetRetryRate() const [member function]
cls.add_method('GetRetryRate',
'uint16_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCtsGlobal::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCtsGlobal::GetTxTimeStamp() const [member function]
cls.add_method('GetTxTimeStamp',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcCtsGlobal::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCtsGlobal::GetWindowTime() const [member function]
cls.add_method('GetWindowTime',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetRateNum(uint16_t rate) [member function]
cls.add_method('SetRateNum',
'void',
[param('uint16_t', 'rate')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetRetryRate(uint16_t rate) [member function]
cls.add_method('SetRetryRate',
'void',
[param('uint16_t', 'rate')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetTxTimeStamp(ns3::Time timeStamp) [member function]
cls.add_method('SetTxTimeStamp',
'void',
[param('ns3::Time', 'timeStamp')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetWindowTime(ns3::Time t) [member function]
cls.add_method('SetWindowTime',
'void',
[param('ns3::Time', 't')])
return
def register_Ns3UanHeaderRcData_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData::UanHeaderRcData(ns3::UanHeaderRcData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcData const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData::UanHeaderRcData() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData::UanHeaderRcData(uint8_t frameNum, ns3::Time propDelay) [constructor]
cls.add_constructor([param('uint8_t', 'frameNum'), param('ns3::Time', 'propDelay')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcData::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcData::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcData::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcData::GetPropDelay() const [member function]
cls.add_method('GetPropDelay',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcData::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcData::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::SetFrameNo(uint8_t frameNum) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'frameNum')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::SetPropDelay(ns3::Time propDelay) [member function]
cls.add_method('SetPropDelay',
'void',
[param('ns3::Time', 'propDelay')])
return
def register_Ns3UanHeaderRcRts_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts::UanHeaderRcRts(ns3::UanHeaderRcRts const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcRts const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts::UanHeaderRcRts() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts::UanHeaderRcRts(uint8_t frameNo, uint8_t retryNo, uint8_t noFrames, uint16_t length, ns3::Time ts) [constructor]
cls.add_constructor([param('uint8_t', 'frameNo'), param('uint8_t', 'retryNo'), param('uint8_t', 'noFrames'), param('uint16_t', 'length'), param('ns3::Time', 'ts')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcRts::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcRts::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcRts::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): uint16_t ns3::UanHeaderRcRts::GetLength() const [member function]
cls.add_method('GetLength',
'uint16_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcRts::GetNoFrames() const [member function]
cls.add_method('GetNoFrames',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcRts::GetRetryNo() const [member function]
cls.add_method('GetRetryNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcRts::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcRts::GetTimeStamp() const [member function]
cls.add_method('GetTimeStamp',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcRts::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetFrameNo(uint8_t fno) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'fno')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetLength(uint16_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint16_t', 'length')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetNoFrames(uint8_t no) [member function]
cls.add_method('SetNoFrames',
'void',
[param('uint8_t', 'no')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetRetryNo(uint8_t no) [member function]
cls.add_method('SetRetryNo',
'void',
[param('uint8_t', 'no')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetTimeStamp(ns3::Time timeStamp) [member function]
cls.add_method('SetTimeStamp',
'void',
[param('ns3::Time', 'timeStamp')])
return
def register_Ns3UanMac_methods(root_module, cls):
## uan-mac.h (module 'uan'): ns3::UanMac::UanMac() [constructor]
cls.add_constructor([])
## uan-mac.h (module 'uan'): ns3::UanMac::UanMac(ns3::UanMac const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMac const &', 'arg0')])
## uan-mac.h (module 'uan'): int64_t ns3::UanMac::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): bool ns3::UanMac::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): ns3::Address ns3::UanMac::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): ns3::Address ns3::UanMac::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-mac.h (module 'uan'): static ns3::TypeId ns3::UanMac::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanMacAloha_methods(root_module, cls):
## uan-mac-aloha.h (module 'uan'): ns3::UanMacAloha::UanMacAloha(ns3::UanMacAloha const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacAloha const &', 'arg0')])
## uan-mac-aloha.h (module 'uan'): ns3::UanMacAloha::UanMacAloha() [constructor]
cls.add_constructor([])
## uan-mac-aloha.h (module 'uan'): int64_t ns3::UanMacAloha::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): bool ns3::UanMacAloha::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): ns3::Address ns3::UanMacAloha::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): ns3::Address ns3::UanMacAloha::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-aloha.h (module 'uan'): static ns3::TypeId ns3::UanMacAloha::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanMacCw_methods(root_module, cls):
## uan-mac-cw.h (module 'uan'): ns3::UanMacCw::UanMacCw(ns3::UanMacCw const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacCw const &', 'arg0')])
## uan-mac-cw.h (module 'uan'): ns3::UanMacCw::UanMacCw() [constructor]
cls.add_constructor([])
## uan-mac-cw.h (module 'uan'): int64_t ns3::UanMacCw::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): bool ns3::UanMacCw::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): ns3::Address ns3::UanMacCw::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): ns3::Address ns3::UanMacCw::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-cw.h (module 'uan'): uint32_t ns3::UanMacCw::GetCw() [member function]
cls.add_method('GetCw',
'uint32_t',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): ns3::Time ns3::UanMacCw::GetSlotTime() [member function]
cls.add_method('GetSlotTime',
'ns3::Time',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): static ns3::TypeId ns3::UanMacCw::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyCcaEnd() [member function]
cls.add_method('NotifyCcaEnd',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyCcaStart() [member function]
cls.add_method('NotifyCcaStart',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyRxEndError() [member function]
cls.add_method('NotifyRxEndError',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyRxEndOk() [member function]
cls.add_method('NotifyRxEndOk',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyRxStart() [member function]
cls.add_method('NotifyRxStart',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyTxStart(ns3::Time duration) [member function]
cls.add_method('NotifyTxStart',
'void',
[param('ns3::Time', 'duration')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetCw(uint32_t cw) [member function]
cls.add_method('SetCw',
'void',
[param('uint32_t', 'cw')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetSlotTime(ns3::Time duration) [member function]
cls.add_method('SetSlotTime',
'void',
[param('ns3::Time', 'duration')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanMacRc_methods(root_module, cls):
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc::UanMacRc(ns3::UanMacRc const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacRc const &', 'arg0')])
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc::UanMacRc() [constructor]
cls.add_constructor([])
## uan-mac-rc.h (module 'uan'): int64_t ns3::UanMacRc::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): bool ns3::UanMacRc::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): ns3::Address ns3::UanMacRc::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): ns3::Address ns3::UanMacRc::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-rc.h (module 'uan'): static ns3::TypeId ns3::UanMacRc::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanMacRcGw_methods(root_module, cls):
## uan-mac-rc-gw.h (module 'uan'): ns3::UanMacRcGw::UanMacRcGw(ns3::UanMacRcGw const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacRcGw const &', 'arg0')])
## uan-mac-rc-gw.h (module 'uan'): ns3::UanMacRcGw::UanMacRcGw() [constructor]
cls.add_constructor([])
## uan-mac-rc-gw.h (module 'uan'): int64_t ns3::UanMacRcGw::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): bool ns3::UanMacRcGw::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): ns3::Address ns3::UanMacRcGw::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): ns3::Address ns3::UanMacRcGw::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): static ns3::TypeId ns3::UanMacRcGw::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanNoiseModel_methods(root_module, cls):
## uan-noise-model.h (module 'uan'): ns3::UanNoiseModel::UanNoiseModel() [constructor]
cls.add_constructor([])
## uan-noise-model.h (module 'uan'): ns3::UanNoiseModel::UanNoiseModel(ns3::UanNoiseModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanNoiseModel const &', 'arg0')])
## uan-noise-model.h (module 'uan'): void ns3::UanNoiseModel::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-noise-model.h (module 'uan'): void ns3::UanNoiseModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## uan-noise-model.h (module 'uan'): double ns3::UanNoiseModel::GetNoiseDbHz(double fKhz) const [member function]
cls.add_method('GetNoiseDbHz',
'double',
[param('double', 'fKhz')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-noise-model.h (module 'uan'): static ns3::TypeId ns3::UanNoiseModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanNoiseModelDefault_methods(root_module, cls):
## uan-noise-model-default.h (module 'uan'): ns3::UanNoiseModelDefault::UanNoiseModelDefault(ns3::UanNoiseModelDefault const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanNoiseModelDefault const &', 'arg0')])
## uan-noise-model-default.h (module 'uan'): ns3::UanNoiseModelDefault::UanNoiseModelDefault() [constructor]
cls.add_constructor([])
## uan-noise-model-default.h (module 'uan'): double ns3::UanNoiseModelDefault::GetNoiseDbHz(double fKhz) const [member function]
cls.add_method('GetNoiseDbHz',
'double',
[param('double', 'fKhz')],
is_const=True, is_virtual=True)
## uan-noise-model-default.h (module 'uan'): static ns3::TypeId ns3::UanNoiseModelDefault::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhy_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhy::UanPhy() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhy::UanPhy(ns3::UanPhy const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhy const &', 'arg0')])
## uan-phy.h (module 'uan'): int64_t ns3::UanPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::EnergyDepletionHandler() [member function]
cls.add_method('EnergyDepletionHandler',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::EnergyRechargeHandler() [member function]
cls.add_method('EnergyRechargeHandler',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetCcaThresholdDb() [member function]
cls.add_method('GetCcaThresholdDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanPhy::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::UanNetDevice >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::UanTxMode ns3::UanPhy::GetMode(uint32_t n) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'n')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): uint32_t ns3::UanPhy::GetNModes() [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhy::GetPacketRx() const [member function]
cls.add_method('GetPacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetRxGainDb() [member function]
cls.add_method('GetRxGainDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetRxThresholdDb() [member function]
cls.add_method('GetRxThresholdDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanPhy::GetTransducer() [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetTxPowerDb() [member function]
cls.add_method('GetTxPowerDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): static ns3::TypeId ns3::UanPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateBusy() [member function]
cls.add_method('IsStateBusy',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateCcaBusy() [member function]
cls.add_method('IsStateCcaBusy',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateIdle() [member function]
cls.add_method('IsStateIdle',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateRx() [member function]
cls.add_method('IsStateRx',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateSleep() [member function]
cls.add_method('IsStateSleep',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateTx() [member function]
cls.add_method('IsStateTx',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyIntChange() [member function]
cls.add_method('NotifyIntChange',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyRxBegin(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyRxBegin',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyRxDrop(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyRxDrop',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyRxEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyRxEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTransStartTx(ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('NotifyTransStartTx',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTxBegin(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyTxBegin',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTxDrop(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyTxDrop',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTxEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyTxEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::RegisterListener(ns3::UanPhyListener * listener) [member function]
cls.add_method('RegisterListener',
'void',
[param('ns3::UanPhyListener *', 'listener')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SendPacket(ns3::Ptr<ns3::Packet> pkt, uint32_t modeNum) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint32_t', 'modeNum')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetCcaThresholdDb(double thresh) [member function]
cls.add_method('SetCcaThresholdDb',
'void',
[param('double', 'thresh')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetDevice(ns3::Ptr<ns3::UanNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetEnergyModelCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyModelCallback',
'void',
[param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetReceiveErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetReceiveOkCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveOkCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetRxGainDb(double gain) [member function]
cls.add_method('SetRxGainDb',
'void',
[param('double', 'gain')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetRxThresholdDb(double thresh) [member function]
cls.add_method('SetRxThresholdDb',
'void',
[param('double', 'thresh')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetTxPowerDb(double txpwr) [member function]
cls.add_method('SetTxPowerDb',
'void',
[param('double', 'txpwr')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::StartRxPacket(ns3::Ptr<ns3::Packet> pkt, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('StartRxPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanPhyCalcSinr_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhyCalcSinr::UanPhyCalcSinr() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhyCalcSinr::UanPhyCalcSinr(ns3::UanPhyCalcSinr const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinr const &', 'arg0')])
## uan-phy.h (module 'uan'): double ns3::UanPhyCalcSinr::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyCalcSinr::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhyCalcSinr::DbToKp(double db) const [member function]
cls.add_method('DbToKp',
'double',
[param('double', 'db')],
is_const=True)
## uan-phy.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinr::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy.h (module 'uan'): double ns3::UanPhyCalcSinr::KpToDb(double kp) const [member function]
cls.add_method('KpToDb',
'double',
[param('double', 'kp')],
is_const=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyCalcSinr::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanPhyCalcSinrDefault_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrDefault::UanPhyCalcSinrDefault(ns3::UanPhyCalcSinrDefault const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinrDefault const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrDefault::UanPhyCalcSinrDefault() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyCalcSinrDefault::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinrDefault::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyCalcSinrDual_methods(root_module, cls):
## uan-phy-dual.h (module 'uan'): ns3::UanPhyCalcSinrDual::UanPhyCalcSinrDual(ns3::UanPhyCalcSinrDual const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinrDual const &', 'arg0')])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyCalcSinrDual::UanPhyCalcSinrDual() [constructor]
cls.add_constructor([])
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyCalcSinrDual::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinrDual::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyCalcSinrFhFsk_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrFhFsk::UanPhyCalcSinrFhFsk(ns3::UanPhyCalcSinrFhFsk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinrFhFsk const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrFhFsk::UanPhyCalcSinrFhFsk() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyCalcSinrFhFsk::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinrFhFsk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyDual_methods(root_module, cls):
## uan-phy-dual.h (module 'uan'): ns3::UanPhyDual::UanPhyDual(ns3::UanPhyDual const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyDual const &', 'arg0')])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyDual::UanPhyDual() [constructor]
cls.add_constructor([])
## uan-phy-dual.h (module 'uan'): int64_t ns3::UanPhyDual::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::EnergyDepletionHandler() [member function]
cls.add_method('EnergyDepletionHandler',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::EnergyRechargeHandler() [member function]
cls.add_method('EnergyRechargeHandler',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetCcaThresholdDb() [member function]
cls.add_method('GetCcaThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetCcaThresholdPhy1() const [member function]
cls.add_method('GetCcaThresholdPhy1',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetCcaThresholdPhy2() const [member function]
cls.add_method('GetCcaThresholdPhy2',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanPhyDual::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanPhyDual::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::UanNetDevice >',
[],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::UanTxMode ns3::UanPhyDual::GetMode(uint32_t n) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'n')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::UanModesList ns3::UanPhyDual::GetModesPhy1() const [member function]
cls.add_method('GetModesPhy1',
'ns3::UanModesList',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::UanModesList ns3::UanPhyDual::GetModesPhy2() const [member function]
cls.add_method('GetModesPhy2',
'ns3::UanModesList',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): uint32_t ns3::UanPhyDual::GetNModes() [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyDual::GetPacketRx() const [member function]
cls.add_method('GetPacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyPer> ns3::UanPhyDual::GetPerModelPhy1() const [member function]
cls.add_method('GetPerModelPhy1',
'ns3::Ptr< ns3::UanPhyPer >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyPer> ns3::UanPhyDual::GetPerModelPhy2() const [member function]
cls.add_method('GetPerModelPhy2',
'ns3::Ptr< ns3::UanPhyPer >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyDual::GetPhy1PacketRx() const [member function]
cls.add_method('GetPhy1PacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyDual::GetPhy2PacketRx() const [member function]
cls.add_method('GetPhy2PacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxGainDb() [member function]
cls.add_method('GetRxGainDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxGainDbPhy1() const [member function]
cls.add_method('GetRxGainDbPhy1',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxGainDbPhy2() const [member function]
cls.add_method('GetRxGainDbPhy2',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxThresholdDb() [member function]
cls.add_method('GetRxThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyCalcSinr> ns3::UanPhyDual::GetSinrModelPhy1() const [member function]
cls.add_method('GetSinrModelPhy1',
'ns3::Ptr< ns3::UanPhyCalcSinr >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyCalcSinr> ns3::UanPhyDual::GetSinrModelPhy2() const [member function]
cls.add_method('GetSinrModelPhy2',
'ns3::Ptr< ns3::UanPhyCalcSinr >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanPhyDual::GetTransducer() [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetTxPowerDb() [member function]
cls.add_method('GetTxPowerDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetTxPowerDbPhy1() const [member function]
cls.add_method('GetTxPowerDbPhy1',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetTxPowerDbPhy2() const [member function]
cls.add_method('GetTxPowerDbPhy2',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): static ns3::TypeId ns3::UanPhyDual::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy1Idle() [member function]
cls.add_method('IsPhy1Idle',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy1Rx() [member function]
cls.add_method('IsPhy1Rx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy1Tx() [member function]
cls.add_method('IsPhy1Tx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy2Idle() [member function]
cls.add_method('IsPhy2Idle',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy2Rx() [member function]
cls.add_method('IsPhy2Rx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy2Tx() [member function]
cls.add_method('IsPhy2Tx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateBusy() [member function]
cls.add_method('IsStateBusy',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateCcaBusy() [member function]
cls.add_method('IsStateCcaBusy',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateIdle() [member function]
cls.add_method('IsStateIdle',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateRx() [member function]
cls.add_method('IsStateRx',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateSleep() [member function]
cls.add_method('IsStateSleep',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateTx() [member function]
cls.add_method('IsStateTx',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::NotifyIntChange() [member function]
cls.add_method('NotifyIntChange',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::NotifyTransStartTx(ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('NotifyTransStartTx',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::RegisterListener(ns3::UanPhyListener * listener) [member function]
cls.add_method('RegisterListener',
'void',
[param('ns3::UanPhyListener *', 'listener')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SendPacket(ns3::Ptr<ns3::Packet> pkt, uint32_t modeNum) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint32_t', 'modeNum')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetCcaThresholdDb(double thresh) [member function]
cls.add_method('SetCcaThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetCcaThresholdPhy1(double thresh) [member function]
cls.add_method('SetCcaThresholdPhy1',
'void',
[param('double', 'thresh')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetCcaThresholdPhy2(double thresh) [member function]
cls.add_method('SetCcaThresholdPhy2',
'void',
[param('double', 'thresh')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetDevice(ns3::Ptr<ns3::UanNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'device')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetEnergyModelCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyModelCallback',
'void',
[param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetModesPhy1(ns3::UanModesList modes) [member function]
cls.add_method('SetModesPhy1',
'void',
[param('ns3::UanModesList', 'modes')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetModesPhy2(ns3::UanModesList modes) [member function]
cls.add_method('SetModesPhy2',
'void',
[param('ns3::UanModesList', 'modes')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetPerModelPhy1(ns3::Ptr<ns3::UanPhyPer> per) [member function]
cls.add_method('SetPerModelPhy1',
'void',
[param('ns3::Ptr< ns3::UanPhyPer >', 'per')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetPerModelPhy2(ns3::Ptr<ns3::UanPhyPer> per) [member function]
cls.add_method('SetPerModelPhy2',
'void',
[param('ns3::Ptr< ns3::UanPhyPer >', 'per')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetReceiveErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetReceiveOkCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveOkCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxGainDb(double gain) [member function]
cls.add_method('SetRxGainDb',
'void',
[param('double', 'gain')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxGainDbPhy1(double gain) [member function]
cls.add_method('SetRxGainDbPhy1',
'void',
[param('double', 'gain')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxGainDbPhy2(double gain) [member function]
cls.add_method('SetRxGainDbPhy2',
'void',
[param('double', 'gain')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxThresholdDb(double thresh) [member function]
cls.add_method('SetRxThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetSinrModelPhy1(ns3::Ptr<ns3::UanPhyCalcSinr> calcSinr) [member function]
cls.add_method('SetSinrModelPhy1',
'void',
[param('ns3::Ptr< ns3::UanPhyCalcSinr >', 'calcSinr')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetSinrModelPhy2(ns3::Ptr<ns3::UanPhyCalcSinr> calcSinr) [member function]
cls.add_method('SetSinrModelPhy2',
'void',
[param('ns3::Ptr< ns3::UanPhyCalcSinr >', 'calcSinr')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTxPowerDb(double txpwr) [member function]
cls.add_method('SetTxPowerDb',
'void',
[param('double', 'txpwr')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTxPowerDbPhy1(double txpwr) [member function]
cls.add_method('SetTxPowerDbPhy1',
'void',
[param('double', 'txpwr')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTxPowerDbPhy2(double txpwr) [member function]
cls.add_method('SetTxPowerDbPhy2',
'void',
[param('double', 'txpwr')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::StartRxPacket(ns3::Ptr<ns3::Packet> pkt, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('StartRxPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanPhyGen_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyGen::UanPhyGen(ns3::UanPhyGen const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyGen const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyGen::UanPhyGen() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): int64_t ns3::UanPhyGen::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::EnergyDepletionHandler() [member function]
cls.add_method('EnergyDepletionHandler',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::EnergyRechargeHandler() [member function]
cls.add_method('EnergyRechargeHandler',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetCcaThresholdDb() [member function]
cls.add_method('GetCcaThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanPhyGen::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::UanModesList ns3::UanPhyGen::GetDefaultModes() [member function]
cls.add_method('GetDefaultModes',
'ns3::UanModesList',
[],
is_static=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanPhyGen::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::UanNetDevice >',
[],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::UanTxMode ns3::UanPhyGen::GetMode(uint32_t n) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'n')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): uint32_t ns3::UanPhyGen::GetNModes() [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyGen::GetPacketRx() const [member function]
cls.add_method('GetPacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetRxGainDb() [member function]
cls.add_method('GetRxGainDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetRxThresholdDb() [member function]
cls.add_method('GetRxThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanPhyGen::GetTransducer() [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetTxPowerDb() [member function]
cls.add_method('GetTxPowerDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyGen::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateBusy() [member function]
cls.add_method('IsStateBusy',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateCcaBusy() [member function]
cls.add_method('IsStateCcaBusy',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateIdle() [member function]
cls.add_method('IsStateIdle',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateRx() [member function]
cls.add_method('IsStateRx',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateSleep() [member function]
cls.add_method('IsStateSleep',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateTx() [member function]
cls.add_method('IsStateTx',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::NotifyIntChange() [member function]
cls.add_method('NotifyIntChange',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::NotifyTransStartTx(ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('NotifyTransStartTx',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::RegisterListener(ns3::UanPhyListener * listener) [member function]
cls.add_method('RegisterListener',
'void',
[param('ns3::UanPhyListener *', 'listener')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SendPacket(ns3::Ptr<ns3::Packet> pkt, uint32_t modeNum) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint32_t', 'modeNum')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetCcaThresholdDb(double thresh) [member function]
cls.add_method('SetCcaThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetDevice(ns3::Ptr<ns3::UanNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'device')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetEnergyModelCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetEnergyModelCallback',
'void',
[param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetReceiveErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetReceiveOkCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveOkCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetRxGainDb(double gain) [member function]
cls.add_method('SetRxGainDb',
'void',
[param('double', 'gain')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetRxThresholdDb(double thresh) [member function]
cls.add_method('SetRxThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetTxPowerDb(double txpwr) [member function]
cls.add_method('SetTxPowerDb',
'void',
[param('double', 'txpwr')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::StartRxPacket(ns3::Ptr<ns3::Packet> pkt, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('StartRxPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanPhyPer_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhyPer::UanPhyPer() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhyPer::UanPhyPer(ns3::UanPhyPer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyPer const &', 'arg0')])
## uan-phy.h (module 'uan'): double ns3::UanPhyPer::CalcPer(ns3::Ptr<ns3::Packet> pkt, double sinrDb, ns3::UanTxMode mode) [member function]
cls.add_method('CalcPer',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyPer::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy.h (module 'uan'): static ns3::TypeId ns3::UanPhyPer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyPer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanPhyPerGenDefault_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerGenDefault::UanPhyPerGenDefault(ns3::UanPhyPerGenDefault const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyPerGenDefault const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerGenDefault::UanPhyPerGenDefault() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyPerGenDefault::CalcPer(ns3::Ptr<ns3::Packet> pkt, double sinrDb, ns3::UanTxMode mode) [member function]
cls.add_method('CalcPer',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyPerGenDefault::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyPerUmodem_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerUmodem::UanPhyPerUmodem(ns3::UanPhyPerUmodem const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyPerUmodem const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerUmodem::UanPhyPerUmodem() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyPerUmodem::CalcPer(ns3::Ptr<ns3::Packet> pkt, double sinrDb, ns3::UanTxMode mode) [member function]
cls.add_method('CalcPer',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyPerUmodem::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPropModel_methods(root_module, cls):
## uan-prop-model.h (module 'uan'): ns3::UanPropModel::UanPropModel() [constructor]
cls.add_constructor([])
## uan-prop-model.h (module 'uan'): ns3::UanPropModel::UanPropModel(ns3::UanPropModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPropModel const &', 'arg0')])
## uan-prop-model.h (module 'uan'): void ns3::UanPropModel::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-prop-model.h (module 'uan'): void ns3::UanPropModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## uan-prop-model.h (module 'uan'): ns3::Time ns3::UanPropModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_pure_virtual=True, is_virtual=True)
## uan-prop-model.h (module 'uan'): double ns3::UanPropModel::GetPathLossDb(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode txMode) [member function]
cls.add_method('GetPathLossDb',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'txMode')],
is_pure_virtual=True, is_virtual=True)
## uan-prop-model.h (module 'uan'): ns3::UanPdp ns3::UanPropModel::GetPdp(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_pure_virtual=True, is_virtual=True)
## uan-prop-model.h (module 'uan'): static ns3::TypeId ns3::UanPropModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPropModelIdeal_methods(root_module, cls):
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPropModelIdeal::UanPropModelIdeal(ns3::UanPropModelIdeal const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPropModelIdeal const &', 'arg0')])
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPropModelIdeal::UanPropModelIdeal() [constructor]
cls.add_constructor([])
## uan-prop-model-ideal.h (module 'uan'): ns3::Time ns3::UanPropModelIdeal::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-ideal.h (module 'uan'): double ns3::UanPropModelIdeal::GetPathLossDb(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPathLossDb',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPdp ns3::UanPropModelIdeal::GetPdp(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-ideal.h (module 'uan'): static ns3::TypeId ns3::UanPropModelIdeal::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPropModelThorp_methods(root_module, cls):
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPropModelThorp::UanPropModelThorp(ns3::UanPropModelThorp const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPropModelThorp const &', 'arg0')])
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPropModelThorp::UanPropModelThorp() [constructor]
cls.add_constructor([])
## uan-prop-model-thorp.h (module 'uan'): ns3::Time ns3::UanPropModelThorp::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-thorp.h (module 'uan'): double ns3::UanPropModelThorp::GetPathLossDb(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPathLossDb',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPdp ns3::UanPropModelThorp::GetPdp(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-thorp.h (module 'uan'): static ns3::TypeId ns3::UanPropModelThorp::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanTransducer_methods(root_module, cls):
## uan-transducer.h (module 'uan'): ns3::UanTransducer::UanTransducer() [constructor]
cls.add_constructor([])
## uan-transducer.h (module 'uan'): ns3::UanTransducer::UanTransducer(ns3::UanTransducer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTransducer const &', 'arg0')])
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::AddPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AddPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & ns3::UanTransducer::GetArrivalList() const [member function]
cls.add_method('GetArrivalList',
'std::list< ns3::UanPacketArrival > const &',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanTransducer::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): std::list<ns3::Ptr<ns3::UanPhy>, std::allocator<ns3::Ptr<ns3::UanPhy> > > const & ns3::UanTransducer::GetPhyList() const [member function]
cls.add_method('GetPhyList',
'std::list< ns3::Ptr< ns3::UanPhy > > const &',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): ns3::UanTransducer::State ns3::UanTransducer::GetState() const [member function]
cls.add_method('GetState',
'ns3::UanTransducer::State',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): static ns3::TypeId ns3::UanTransducer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-transducer.h (module 'uan'): bool ns3::UanTransducer::IsRx() const [member function]
cls.add_method('IsRx',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): bool ns3::UanTransducer::IsTx() const [member function]
cls.add_method('IsTx',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::Receive(ns3::Ptr<ns3::Packet> packet, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::SetChannel(ns3::Ptr<ns3::UanChannel> chan) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'chan')],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::Transmit(ns3::Ptr<ns3::UanPhy> src, ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('Transmit',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanTransducerHd_methods(root_module, cls):
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducerHd::UanTransducerHd(ns3::UanTransducerHd const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTransducerHd const &', 'arg0')])
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducerHd::UanTransducerHd() [constructor]
cls.add_constructor([])
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::AddPhy(ns3::Ptr<ns3::UanPhy> arg0) [member function]
cls.add_method('AddPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'arg0')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & ns3::UanTransducerHd::GetArrivalList() const [member function]
cls.add_method('GetArrivalList',
'std::list< ns3::UanPacketArrival > const &',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanTransducerHd::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): std::list<ns3::Ptr<ns3::UanPhy>, std::allocator<ns3::Ptr<ns3::UanPhy> > > const & ns3::UanTransducerHd::GetPhyList() const [member function]
cls.add_method('GetPhyList',
'std::list< ns3::Ptr< ns3::UanPhy > > const &',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducer::State ns3::UanTransducerHd::GetState() const [member function]
cls.add_method('GetState',
'ns3::UanTransducer::State',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): static ns3::TypeId ns3::UanTransducerHd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-transducer-hd.h (module 'uan'): bool ns3::UanTransducerHd::IsRx() const [member function]
cls.add_method('IsRx',
'bool',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): bool ns3::UanTransducerHd::IsTx() const [member function]
cls.add_method('IsTx',
'bool',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::Receive(ns3::Ptr<ns3::Packet> packet, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::SetChannel(ns3::Ptr<ns3::UanChannel> chan) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'chan')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::Transmit(ns3::Ptr<ns3::UanPhy> src, ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('Transmit',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeviceEnergyModel_methods(root_module, cls):
## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel::DeviceEnergyModel(ns3::DeviceEnergyModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModel const &', 'arg0')])
## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel::DeviceEnergyModel() [constructor]
cls.add_constructor([])
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::ChangeState(int newState) [member function]
cls.add_method('ChangeState',
'void',
[param('int', 'newState')],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::GetCurrentA() const [member function]
cls.add_method('GetCurrentA',
'double',
[],
is_const=True)
## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::GetTotalEnergyConsumption() const [member function]
cls.add_method('GetTotalEnergyConsumption',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## device-energy-model.h (module 'energy'): static ns3::TypeId ns3::DeviceEnergyModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::HandleEnergyDepletion() [member function]
cls.add_method('HandleEnergyDepletion',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::HandleEnergyRecharged() [member function]
cls.add_method('HandleEnergyRecharged',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('SetEnergySource',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::DoGetCurrentA() const [member function]
cls.add_method('DoGetCurrentA',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnergyHarvester_methods(root_module, cls):
## energy-harvester.h (module 'energy'): ns3::EnergyHarvester::EnergyHarvester(ns3::EnergyHarvester const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergyHarvester const &', 'arg0')])
## energy-harvester.h (module 'energy'): ns3::EnergyHarvester::EnergyHarvester() [constructor]
cls.add_constructor([])
## energy-harvester.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergyHarvester::GetEnergySource() const [member function]
cls.add_method('GetEnergySource',
'ns3::Ptr< ns3::EnergySource >',
[],
is_const=True)
## energy-harvester.h (module 'energy'): ns3::Ptr<ns3::Node> ns3::EnergyHarvester::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## energy-harvester.h (module 'energy'): double ns3::EnergyHarvester::GetPower() const [member function]
cls.add_method('GetPower',
'double',
[],
is_const=True)
## energy-harvester.h (module 'energy'): static ns3::TypeId ns3::EnergyHarvester::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## energy-harvester.h (module 'energy'): void ns3::EnergyHarvester::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('SetEnergySource',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')])
## energy-harvester.h (module 'energy'): void ns3::EnergyHarvester::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## energy-harvester.h (module 'energy'): void ns3::EnergyHarvester::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## energy-harvester.h (module 'energy'): double ns3::EnergyHarvester::DoGetPower() const [member function]
cls.add_method('DoGetPower',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnergySource_methods(root_module, cls):
## energy-source.h (module 'energy'): ns3::EnergySource::EnergySource(ns3::EnergySource const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergySource const &', 'arg0')])
## energy-source.h (module 'energy'): ns3::EnergySource::EnergySource() [constructor]
cls.add_constructor([])
## energy-source.h (module 'energy'): void ns3::EnergySource::AppendDeviceEnergyModel(ns3::Ptr<ns3::DeviceEnergyModel> deviceEnergyModelPtr) [member function]
cls.add_method('AppendDeviceEnergyModel',
'void',
[param('ns3::Ptr< ns3::DeviceEnergyModel >', 'deviceEnergyModelPtr')])
## energy-source.h (module 'energy'): void ns3::EnergySource::ConnectEnergyHarvester(ns3::Ptr<ns3::EnergyHarvester> energyHarvesterPtr) [member function]
cls.add_method('ConnectEnergyHarvester',
'void',
[param('ns3::Ptr< ns3::EnergyHarvester >', 'energyHarvesterPtr')])
## energy-source.h (module 'energy'): void ns3::EnergySource::DisposeDeviceModels() [member function]
cls.add_method('DisposeDeviceModels',
'void',
[])
## energy-source.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::EnergySource::FindDeviceEnergyModels(ns3::TypeId tid) [member function]
cls.add_method('FindDeviceEnergyModels',
'ns3::DeviceEnergyModelContainer',
[param('ns3::TypeId', 'tid')])
## energy-source.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::EnergySource::FindDeviceEnergyModels(std::string name) [member function]
cls.add_method('FindDeviceEnergyModels',
'ns3::DeviceEnergyModelContainer',
[param('std::string', 'name')])
## energy-source.h (module 'energy'): double ns3::EnergySource::GetEnergyFraction() [member function]
cls.add_method('GetEnergyFraction',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## energy-source.h (module 'energy'): double ns3::EnergySource::GetInitialEnergy() const [member function]
cls.add_method('GetInitialEnergy',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## energy-source.h (module 'energy'): ns3::Ptr<ns3::Node> ns3::EnergySource::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## energy-source.h (module 'energy'): double ns3::EnergySource::GetRemainingEnergy() [member function]
cls.add_method('GetRemainingEnergy',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## energy-source.h (module 'energy'): double ns3::EnergySource::GetSupplyVoltage() const [member function]
cls.add_method('GetSupplyVoltage',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## energy-source.h (module 'energy'): static ns3::TypeId ns3::EnergySource::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## energy-source.h (module 'energy'): void ns3::EnergySource::InitializeDeviceModels() [member function]
cls.add_method('InitializeDeviceModels',
'void',
[])
## energy-source.h (module 'energy'): void ns3::EnergySource::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## energy-source.h (module 'energy'): void ns3::EnergySource::UpdateEnergySource() [member function]
cls.add_method('UpdateEnergySource',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## energy-source.h (module 'energy'): void ns3::EnergySource::BreakDeviceEnergyModelRefCycle() [member function]
cls.add_method('BreakDeviceEnergyModelRefCycle',
'void',
[],
visibility='protected')
## energy-source.h (module 'energy'): double ns3::EnergySource::CalculateTotalCurrent() [member function]
cls.add_method('CalculateTotalCurrent',
'double',
[],
visibility='protected')
## energy-source.h (module 'energy'): void ns3::EnergySource::NotifyEnergyDrained() [member function]
cls.add_method('NotifyEnergyDrained',
'void',
[],
visibility='protected')
## energy-source.h (module 'energy'): void ns3::EnergySource::NotifyEnergyRecharged() [member function]
cls.add_method('NotifyEnergyRecharged',
'void',
[],
visibility='protected')
## energy-source.h (module 'energy'): void ns3::EnergySource::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EnergySourceContainer_methods(root_module, cls):
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::EnergySourceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergySourceContainer const &', 'arg0')])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer() [constructor]
cls.add_constructor([])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::Ptr<ns3::EnergySource> source) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EnergySource >', 'source')])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(std::string sourceName) [constructor]
cls.add_constructor([param('std::string', 'sourceName')])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::EnergySourceContainer const & a, ns3::EnergySourceContainer const & b) [constructor]
cls.add_constructor([param('ns3::EnergySourceContainer const &', 'a'), param('ns3::EnergySourceContainer const &', 'b')])
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(ns3::EnergySourceContainer container) [member function]
cls.add_method('Add',
'void',
[param('ns3::EnergySourceContainer', 'container')])
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')])
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(std::string sourceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'sourceName')])
## energy-source-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::EnergySource>*,std::vector<ns3::Ptr<ns3::EnergySource>, std::allocator<ns3::Ptr<ns3::EnergySource> > > > ns3::EnergySourceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::EnergySource > const, std::vector< ns3::Ptr< ns3::EnergySource > > >',
[],
is_const=True)
## energy-source-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::EnergySource>*,std::vector<ns3::Ptr<ns3::EnergySource>, std::allocator<ns3::Ptr<ns3::EnergySource> > > > ns3::EnergySourceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::EnergySource > const, std::vector< ns3::Ptr< ns3::EnergySource > > >',
[],
is_const=True)
## energy-source-container.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergySourceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::EnergySource >',
[param('uint32_t', 'i')],
is_const=True)
## energy-source-container.h (module 'energy'): uint32_t ns3::EnergySourceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## energy-source-container.h (module 'energy'): static ns3::TypeId ns3::EnergySourceContainer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int value, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int value, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int value) [constructor]
cls.add_constructor([param('int', 'value')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int value) [member function]
cls.add_method('Set',
'void',
[param('int', 'value')])
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3MobilityModel_methods(root_module, cls):
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')])
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor]
cls.add_constructor([])
## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<const ns3::MobilityModel> position) const [member function]
cls.add_method('GetDistanceFrom',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'position')],
is_const=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function]
cls.add_method('GetPosition',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<const ns3::MobilityModel> other) const [member function]
cls.add_method('GetRelativeSpeed',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'other')],
is_const=True)
## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function]
cls.add_method('GetVelocity',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function]
cls.add_method('SetPosition',
'void',
[param('ns3::Vector const &', 'position')])
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function]
cls.add_method('NotifyCourseChange',
'void',
[],
is_const=True, visibility='protected')
## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::DoAssignStreams(int64_t start) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'start')],
visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function]
cls.add_method('DoGetPosition',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function]
cls.add_method('DoGetVelocity',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function]
cls.add_method('DoSetPosition',
'void',
[param('ns3::Vector const &', 'position')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
deprecated=True, is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker(ns3::PointerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')])
## pointer.h (module 'core'): ns3::TypeId ns3::PointerChecker::GetPointeeTypeId() const [member function]
cls.add_method('GetPointeeTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PointerValue_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::PointerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerValue const &', 'arg0')])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::Ptr<ns3::Object> object) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Object >', 'object')])
## pointer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::PointerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): bool ns3::PointerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## pointer.h (module 'core'): ns3::Ptr<ns3::Object> ns3::PointerValue::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## pointer.h (module 'core'): std::string ns3::PointerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): void ns3::PointerValue::SetObject(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('SetObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'object')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UanChannel_methods(root_module, cls):
## uan-channel.h (module 'uan'): ns3::UanChannel::UanChannel(ns3::UanChannel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanChannel const &', 'arg0')])
## uan-channel.h (module 'uan'): ns3::UanChannel::UanChannel() [constructor]
cls.add_constructor([])
## uan-channel.h (module 'uan'): void ns3::UanChannel::AddDevice(ns3::Ptr<ns3::UanNetDevice> dev, ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('AddDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'dev'), param('ns3::Ptr< ns3::UanTransducer >', 'trans')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## uan-channel.h (module 'uan'): ns3::Ptr<ns3::NetDevice> ns3::UanChannel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## uan-channel.h (module 'uan'): uint32_t ns3::UanChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-channel.h (module 'uan'): double ns3::UanChannel::GetNoiseDbHz(double fKhz) [member function]
cls.add_method('GetNoiseDbHz',
'double',
[param('double', 'fKhz')])
## uan-channel.h (module 'uan'): static ns3::TypeId ns3::UanChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-channel.h (module 'uan'): void ns3::UanChannel::SetNoiseModel(ns3::Ptr<ns3::UanNoiseModel> noise) [member function]
cls.add_method('SetNoiseModel',
'void',
[param('ns3::Ptr< ns3::UanNoiseModel >', 'noise')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::SetPropagationModel(ns3::Ptr<ns3::UanPropModel> prop) [member function]
cls.add_method('SetPropagationModel',
'void',
[param('ns3::Ptr< ns3::UanPropModel >', 'prop')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::TxPacket(ns3::Ptr<ns3::UanTransducer> src, ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txmode) [member function]
cls.add_method('TxPacket',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txmode')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanModesListChecker_methods(root_module, cls):
## uan-tx-mode.h (module 'uan'): ns3::UanModesListChecker::UanModesListChecker() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListChecker::UanModesListChecker(ns3::UanModesListChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanModesListChecker const &', 'arg0')])
return
def register_Ns3UanModesListValue_methods(root_module, cls):
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue::UanModesListValue() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue::UanModesListValue(ns3::UanModesListValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanModesListValue const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue::UanModesListValue(ns3::UanModesList const & value) [constructor]
cls.add_constructor([param('ns3::UanModesList const &', 'value')])
## uan-tx-mode.h (module 'uan'): ns3::Ptr<ns3::AttributeValue> ns3::UanModesListValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uan-tx-mode.h (module 'uan'): bool ns3::UanModesListValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uan-tx-mode.h (module 'uan'): ns3::UanModesList ns3::UanModesListValue::Get() const [member function]
cls.add_method('Get',
'ns3::UanModesList',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): std::string ns3::UanModesListValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uan-tx-mode.h (module 'uan'): void ns3::UanModesListValue::Set(ns3::UanModesList const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::UanModesList const &', 'value')])
return
def register_Ns3UanNetDevice_methods(root_module, cls):
## uan-net-device.h (module 'uan'): ns3::UanNetDevice::UanNetDevice(ns3::UanNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanNetDevice const &', 'arg0')])
## uan-net-device.h (module 'uan'): ns3::UanNetDevice::UanNetDevice() [constructor]
cls.add_constructor([])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::Channel> ns3::UanNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): uint32_t ns3::UanNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::UanMac> ns3::UanNetDevice::GetMac() const [member function]
cls.add_method('GetMac',
'ns3::Ptr< ns3::UanMac >',
[],
is_const=True)
## uan-net-device.h (module 'uan'): uint16_t ns3::UanNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::Node> ns3::UanNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::UanPhy> ns3::UanNetDevice::GetPhy() const [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::UanPhy >',
[],
is_const=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanNetDevice::GetTransducer() const [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_const=True)
## uan-net-device.h (module 'uan'): static ns3::TypeId ns3::UanNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')])
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')])
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::ForwardUp(ns3::Ptr<ns3::Packet> pkt, ns3::UanAddress const & src) [member function]
cls.add_method('ForwardUp',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::UanAddress const &', 'src')],
visibility='private', is_virtual=True)
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3AcousticModemEnergyModel_methods(root_module, cls):
## acoustic-modem-energy-model.h (module 'uan'): ns3::AcousticModemEnergyModel::AcousticModemEnergyModel(ns3::AcousticModemEnergyModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AcousticModemEnergyModel const &', 'arg0')])
## acoustic-modem-energy-model.h (module 'uan'): ns3::AcousticModemEnergyModel::AcousticModemEnergyModel() [constructor]
cls.add_constructor([])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::ChangeState(int newState) [member function]
cls.add_method('ChangeState',
'void',
[param('int', 'newState')],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): int ns3::AcousticModemEnergyModel::GetCurrentState() const [member function]
cls.add_method('GetCurrentState',
'int',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetIdlePowerW() const [member function]
cls.add_method('GetIdlePowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): ns3::Ptr<ns3::Node> ns3::AcousticModemEnergyModel::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetRxPowerW() const [member function]
cls.add_method('GetRxPowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetSleepPowerW() const [member function]
cls.add_method('GetSleepPowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetTotalEnergyConsumption() const [member function]
cls.add_method('GetTotalEnergyConsumption',
'double',
[],
is_const=True, is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetTxPowerW() const [member function]
cls.add_method('GetTxPowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): static ns3::TypeId ns3::AcousticModemEnergyModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::HandleEnergyDepletion() [member function]
cls.add_method('HandleEnergyDepletion',
'void',
[],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::HandleEnergyRecharged() [member function]
cls.add_method('HandleEnergyRecharged',
'void',
[],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetEnergyDepletionCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyDepletionCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetEnergyRechargeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyRechargeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('SetEnergySource',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetIdlePowerW(double idlePowerW) [member function]
cls.add_method('SetIdlePowerW',
'void',
[param('double', 'idlePowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetRxPowerW(double rxPowerW) [member function]
cls.add_method('SetRxPowerW',
'void',
[param('double', 'rxPowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetSleepPowerW(double sleepPowerW) [member function]
cls.add_method('SetSleepPowerW',
'void',
[param('double', 'sleepPowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetTxPowerW(double txPowerW) [member function]
cls.add_method('SetTxPowerW',
'void',
[param('double', 'txPowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::DoGetCurrentA() const [member function]
cls.add_method('DoGetCurrentA',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## uan-tx-mode.h (module 'uan'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeUanModesListChecker() [free function]
module.add_function('MakeUanModesListChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
pombredanne/MOG | nova/pci/pci_manager.py | 4 | 12345 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.objects import instance
from nova.objects import pci_device
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.pci import pci_request
from nova.pci import pci_stats
from nova.pci import pci_utils
LOG = logging.getLogger(__name__)
class PciDevTracker(object):
"""Manage pci devices in a compute node.
This class fetches pci passthrough information from hypervisor
and trackes the usage of these devices.
It's called by compute node resource tracker to allocate and free
devices to/from instances, and to update the available pci passthrough
devices information from hypervisor periodically. The devices
information is updated to DB when devices information is changed.
"""
def __init__(self, node_id=None):
"""Create a pci device tracker.
If a node_id is passed in, it will fetch pci devices information
from database, otherwise, it will create an empty devices list
and the resource tracker will update the node_id information later.
"""
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
self.stats = pci_stats.PciDeviceStats()
if node_id:
self.pci_devs = pci_device.PciDeviceList.get_by_compute_node(
context, node_id)
else:
self.pci_devs = pci_device.PciDeviceList()
self._initial_instance_usage()
def _initial_instance_usage(self):
self.allocations = collections.defaultdict(list)
self.claims = collections.defaultdict(list)
for dev in self.pci_devs:
uuid = dev['instance_uuid']
if dev['status'] == 'claimed':
self.claims[uuid].append(dev)
elif dev['status'] == 'allocated':
self.allocations[uuid].append(dev)
elif dev['status'] == 'available':
self.stats.add_device(dev)
def _filter_devices_for_spec(self, request_spec, pci_devs):
return [p for p in pci_devs
if pci_utils.pci_device_prop_match(p, request_spec)]
def _get_free_devices_for_request(self, pci_request, pci_devs):
count = pci_request.get('count', 1)
spec = pci_request.get('spec', [])
devs = self._filter_devices_for_spec(spec, pci_devs)
if len(devs) < count:
return None
else:
return devs[:count]
@property
def free_devs(self):
return [dev for dev in self.pci_devs if dev.status == 'available']
def get_free_devices_for_requests(self, pci_requests):
"""Select free pci devices for requests
Pci_requests is a list of pci_request dictionaries. Each dictionary
has three keys:
count: number of pci devices required, default 1
spec: the pci properties that the devices should meet
alias_name: alias the pci_request is translated from, optional
If any single pci_request cannot find any free devices, then the
entire request list will fail.
"""
alloc = []
for request in pci_requests:
available = self._get_free_devices_for_request(
request,
[p for p in self.free_devs if p not in alloc])
if not available:
return []
alloc.extend(available)
return alloc
@property
def all_devs(self):
return self.pci_devs
def save(self, context):
for dev in self.pci_devs:
if dev.obj_what_changed():
dev.save(context)
self.pci_devs.objects = [dev for dev in self.pci_devs
if dev['status'] != 'deleted']
@property
def pci_stats(self):
return self.stats
def set_hvdevs(self, devices):
"""Sync the pci device tracker with hypervisor information.
To support pci device hot plug, we sync with the hypervisor
periodically, fetching all devices information from hypervisor,
update the tracker and sync the DB information.
Devices should not be hot-plugged when assigned to a guest,
but possibly the hypervisor has no such guarantee. The best
we can do is to give a warning if a device is changed
or removed while assigned.
"""
exist_addrs = set([dev['address'] for dev in self.pci_devs])
new_addrs = set([dev['address'] for dev in devices])
for existed in self.pci_devs:
if existed['address'] in exist_addrs - new_addrs:
try:
existed.remove()
except exception.PciDeviceInvalidStatus as e:
LOG.warn(_("Trying to remove device with %(status)s"
"ownership %(instance_uuid)s"), existed)
# Note(yjiang5): remove the device by force so that
# db entry is cleaned in next sync.
existed.status = 'removed'
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
self.stats.consume_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed['address']))
new_value['compute_node_id'] = self.node_id
if existed['status'] in ('claimed', 'allocated'):
# Pci properties may change while assigned because of
# hotplug or config changes. Although normally this should
# not happen.
# As the devices have been assigned to a instance, we defer
# the change till the instance is destroyed. We will
# not sync the new properties with database before that.
# TODO(yjiang5): Not sure if this is a right policy, but
# at least it avoids some confusion and, if needed,
# we can add more action like killing the instance
# by force in future.
self.stale[dev['address']] = dev
else:
existed.update_device(new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
dev_obj = pci_device.PciDevice.create(dev)
self.pci_devs.objects.append(dev_obj)
self.stats.add_device(dev_obj)
def _claim_instance(self, instance, prefix=''):
pci_requests = pci_request.get_instance_pci_requests(
instance, prefix)
if not pci_requests:
return None
devs = self.get_free_devices_for_requests(pci_requests)
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
dev.claim(instance)
self.stats.consume_device(dev)
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
dev.allocate(instance)
def _free_device(self, dev, instance=None):
dev.free(instance)
stale = self.stale.pop(dev['address'], None)
if stale:
dev.update_device(stale)
self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When a instance is resized, the devices in the
# destination node are claimed to the instance in prep_resize stage.
# However, the instance contains only allocated devices
# information, not the claimed one. So we can't use
# instance['pci_devices'] to check the devices to be freed.
for dev in self.pci_devs:
if (dev['status'] in ('claimed', 'allocated') and
dev['instance_uuid'] == instance['uuid']):
self._free_device(dev)
def update_pci_for_instance(self, instance):
"""Update instance's pci usage information.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
uuid = instance['uuid']
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state == vm_states.DELETED:
if self.allocations.pop(uuid, None):
self._free_instance(instance)
elif self.claims.pop(uuid, None):
self._free_instance(instance)
elif task_state == task_states.RESIZE_MIGRATED:
devs = self.allocations.pop(uuid, None)
if devs:
self._free_instance(instance)
elif task_state == task_states.RESIZE_FINISH:
devs = self.claims.pop(uuid, None)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
elif (uuid not in self.allocations and
uuid not in self.claims):
devs = self._claim_instance(instance)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
def update_pci_for_migration(self, instance, sign=1):
"""Update instance's pci usage information when it is migrated.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.
:param sign: claim devices for instance when sign is 1, remove
the claims when sign is -1
"""
uuid = instance['uuid']
if sign == 1 and uuid not in self.claims:
devs = self._claim_instance(instance, 'new_')
self.claims[uuid] = devs
if sign == -1 and uuid in self.claims:
self._free_instance(instance)
def clean_usage(self, instances, migrations, orphans):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = [inst['uuid'] for inst in instances]
existed += [mig['instance_uuid'] for mig in migrations]
existed += [inst['uuid'] for inst in orphans]
for uuid in self.claims.keys():
if uuid not in existed:
for dev in self.claims.pop(uuid):
self._free_device(dev)
for uuid in self.allocations.keys():
if uuid not in existed:
for dev in self.allocations.pop(uuid):
self._free_device(dev)
def set_compute_node_id(self, node_id):
"""Set the compute node id that this object is tracking for.
In current resource tracker implementation, the
compute_node entry is created in the last step of
update_available_resoruces, thus we have to lazily set the
compute_node_id at that time.
"""
if self.node_id and self.node_id != node_id:
raise exception.PciTrackerInvalidNodeId(node_id=self.node_id,
new_node_id=node_id)
self.node_id = node_id
for dev in self.pci_devs:
dev.compute_node_id = node_id
def get_instance_pci_devs(inst):
"""Get the devices assigned to the instances."""
if isinstance(inst, instance.Instance):
return inst.pci_devices
else:
ctxt = context.get_admin_context()
return pci_device.PciDeviceList.get_by_instance_uuid(
ctxt, inst['uuid'])
| apache-2.0 |
rgommers/scipy | scipy/stats/_multivariate.py | 7 | 153934 | #
# Author: Joris Vankerschaver 2013
#
import math
import numpy as np
from numpy import asarray_chkfinite, asarray
import scipy.linalg
from scipy._lib import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
from ._discrete_distns import binom
from . import mvn
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group',
'multivariate_t',
'multivariate_hypergeom']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic:
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super().__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the Generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen:
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Cumulative distribution function.
``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Log of the cumulative distribution function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts : integer
The maximum number of points to use for integration
abseps : float
Absolute error tolerance
releps : float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
maxpts : integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps : float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps : float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x):
return np.log(self.cdf(x))
def cdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,
self.releps)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1], size, dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis, :, :]
if size == 1:
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
"""Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater than or equal "
"to zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
# Check x_i > 0 or alpha_i > 1
xeq0 = (x == 0)
alphalt1 = (alpha < 1)
if x.shape != alpha.shape:
alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
chk = np.logical_and(xeq0, alphalt1)
if np.sum(chk):
raise ValueError("Each entry in 'x' must be greater than zero if its "
"alpha is less than one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""Internal helper function to compute the log of the useful quotient.
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
{\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i = 1
where 0 < x_i < 1.
If the quantiles don't lie within the simplex, a ValueError is raised.
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
Examples
--------
>>> from scipy.stats import dirichlet
Generate a dirichlet random variable
>>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles
>>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters
>>> dirichlet.pdf(quantiles, alpha)
0.2843831684937255
The same PDF but following a log scale
>>> dirichlet.logpdf(quantiles, alpha)
-1.2574327653159187
Once we specify the dirichlet distribution
we can then calculate quantities of interest
>>> dirichlet.mean(alpha) # get the mean of the distribution
array([0.01960784, 0.24509804, 0.73529412])
>>> dirichlet.var(alpha) # get variance
array([0.00089829, 0.00864603, 0.00909517])
>>> dirichlet.entropy(alpha) # calculate the differential entropy
-4.3280162474082715
We can also return random samples from the distribution
>>> dirichlet.rvs(alpha, size=1, random_state=1)
array([[0.00766178, 0.24670518, 0.74563305]])
>>> dirichlet.rvs(alpha, size=2, random_state=2)
array([[0.01639427, 0.1292273 , 0.85437844],
[0.00156917, 0.19033695, 0.80809388]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
def logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : ndarray or scalar
Mean of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : ndarray or scalar
Variance of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return _squeeze_output(out)
def entropy(self, alpha):
"""Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See Also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
return a1
class invwishart_gen(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.empty(x.shape[-1])
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super()._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
""" # noqa: E501
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int_, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int_)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." %
(xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`. For a random rotation in three
dimensions, see `scipy.spatial.transform.Rotation.random`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
See Also
--------
ortho_group, scipy.spatial.transform.Rotation.random
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
D = np.empty((dim,))
for n in range(dim-1):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
D[n] = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D[n]*np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] -= np.outer(np.dot(H[:, n:], x), x)
D[-1] = (-1)**(dim-1)*D[:-1].prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
for n in range(dim):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D * np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] = -D * (H[:, n:] - np.outer(np.dot(H[:, n:], x), x))
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
-----
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> rng = np.random.default_rng()
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
>>> x
array([[ 1. , -0.07198934, -0.20411041, -0.24385796],
[-0.07198934, 1. , 0.12968613, -0.29471382],
[-0.20411041, 0.12968613, 1. , 0.2828693 ],
[-0.24385796, -0.29471382, 0.2828693 , 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length "
"greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form
[ c s ; -s c ]; the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and
m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i, i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""Draw random correlation matrices.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class unitary_group_gen(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) +
1j*random_state.normal(size=(dim, dim)))
q, r = scipy.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return q
unitary_group = unitary_group_gen()
_mvt_doc_default_callparams = \
"""
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = \
"""Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
``pdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Probability density function.
``logpdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Log of the probability density function.
``rvs(loc=None, shape=1, df=1, size=1, random_state=None)``
Draw random samples from a multivariate t-distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\exp\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_t
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t-distribution.
See `multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[:, None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_hypergeom_gen(multi_rv_generic):
r"""A multivariate hypergeometric random variable.
Methods
-------
``pmf(x, m, n)``
Probability mass function.
``logpmf(x, m, n)``
Log of the probability mass function.
``rvs(m, n, size=1, random_state=None)``
Draw random samples from a multivariate hypergeometric
distribution.
``mean(m, n)``
Mean of the multivariate hypergeometric distribution.
``var(m, n)``
Variance of the multivariate hypergeometric distribution.
``cov(m, n)``
Compute the covariance matrix of the multivariate
hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multivariate_hypergeom` is
.. math::
P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
\binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
(x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
\sum_{i=1}^k x_i = n
where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
is the total number of objects in the population (sum of all the
:math:`m_i`), and :math:`n` is the size of the sample to be taken
from the population.
.. versionadded:: 1.6.0
Examples
--------
To evaluate the probability mass function of the multivariate
hypergeometric distribution, with a dichotomous population of size
:math:`10` and :math:`20`, at a sample of size :math:`12` with
:math:`8` objects of the first type and :math:`4` objects of the
second type, use:
>>> from scipy.stats import multivariate_hypergeom
>>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
0.0025207176631464523
The `multivariate_hypergeom` distribution is identical to the
corresponding `hypergeom` distribution (tiny numerical differences
notwithstanding) when only two types (good and bad) of objects
are present in the population as in the example above. Consider
another example for a comparison with the hypergeometric distribution:
>>> from scipy.stats import hypergeom
>>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
0.4395604395604395
>>> hypergeom.pmf(k=3, M=15, n=4, N=10)
0.43956043956044005
The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
support broadcasting, under the convention that the vector parameters
(``x``, ``m``, and ``n``) are interpreted as if each row along the last
axis is a single object. For instance, we can combine the previous two
calls to `multivariate_hypergeom` as
>>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
... n=[12, 4])
array([0.00252072, 0.43956044])
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``m.shape[-1]``. For example:
>>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
array([[[ 1.05, -1.05],
[-1.05, 1.05]],
[[ 1.56, -1.56],
[-1.56, 1.56]]])
That is, ``result[0]`` is equal to
``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
Alternatively, the object may be called (as a function) to fix the `m`
and `n` parameters, returning a "frozen" multivariate hypergeometric
random variable.
>>> rv = multivariate_hypergeom(m=[10, 20], n=12)
>>> rv.pmf(x=[8, 4])
0.0025207176631464523
See Also
--------
scipy.stats.hypergeom : The hypergeometric distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] The Multivariate Hypergeometric Distribution,
http://www.randomservices.org/random/urn/MultiHypergeometric.html
.. [2] Thomas J. Sargent and John Stachurski, 2020,
Multivariate Hypergeometric Distribution
https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
def __call__(self, m, n, seed=None):
"""Create a frozen multivariate_hypergeom distribution.
See `multivariate_hypergeom_frozen` for more information.
"""
return multivariate_hypergeom_frozen(m, n, seed=seed)
def _process_parameters(self, m, n):
m = np.asarray(m)
n = np.asarray(n)
if m.size == 0:
m = m.astype(int)
if n.size == 0:
n = n.astype(int)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError("'m' must an array of integers.")
if not np.issubdtype(n.dtype, np.integer):
raise TypeError("'n' must an array of integers.")
if m.ndim == 0:
raise ValueError("'m' must be an array with"
" at least one dimension.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
m, n = np.broadcast_arrays(m, n)
# check for empty arrays
if m.size != 0:
n = n[..., 0]
mcond = m < 0
M = m.sum(axis=-1)
ncond = (n < 0) | (n > M)
return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
def _process_quantiles(self, x, M, m, n):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.integer):
raise TypeError("'x' must an array of integers.")
if x.ndim == 0:
raise ValueError("'x' must be an array with"
" at least one dimension.")
if not x.shape[-1] == m.shape[-1]:
raise ValueError(f"Size of each quantile must be size of 'm': "
f"received {x.shape[-1]}, "
f"but expected {m.shape[-1]}.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
M = M[..., np.newaxis]
x, m, n, M = np.broadcast_arrays(x, m, n, M)
# check for empty arrays
if m.size != 0:
n, M = n[..., 0], M[..., 0]
xcond = (x < 0) | (x > m)
return (x, M, m, n, xcond,
np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
return bad_value
if result.ndim == 0:
return result[()]
return result
def _logpmf(self, x, M, m, n, mxcond, ncond):
# This equation of the pmf comes from the relation,
# n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
num = np.zeros_like(m, dtype=np.float_)
den = np.zeros_like(n, dtype=np.float_)
m, x = m[~mxcond], x[~mxcond]
M, n = M[~ncond], n[~ncond]
num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
num[mxcond] = np.nan
den[ncond] = np.nan
num = num.sum(axis=-1)
return num - den
def logpmf(self, x, m, n):
"""Log of the multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
(x, M, m, n, xcond,
xcond_reduced) = self._process_quantiles(x, M, m, n)
mxcond = mcond | xcond
ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
result = self._logpmf(x, M, m, n, mxcond, ncond)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or m; broadcast
# mncond to the right shape
mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
return self._checkresult(result, mncond_, np.nan)
def pmf(self, x, m, n):
"""Multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
out = np.exp(self.logpmf(x, m, n))
return out
def mean(self, m, n):
"""Mean of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : array_like or scalar
The mean of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0)
M = np.ma.masked_array(M, mask=cond)
mu = n*(m/M)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(mu.shape, dtype=np.bool_))
return self._checkresult(mu, mncond, np.nan)
def var(self, m, n):
"""Variance of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = n * m/M * (M-m)/M * (M-n)/(M-1)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def cov(self, m, n):
"""Covariance matrix of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : array_like
The covariance matrix of the distribution
"""
# see [1]_ for the formula and [2]_ for implementation
# cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M = M[..., np.newaxis, np.newaxis]
n = n[..., np.newaxis, np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = (-n * (M-n)/(M-1) *
np.einsum("...i,...j->...ij", m, m) / (M**2))
# check for empty arrays
if m.size != 0:
M, n = M[..., 0, 0], n[..., 0, 0]
cond = cond[..., 0, 0]
dim = m.shape[-1]
# diagonal entries need to be computed differently
for i in range(dim):
output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
output[..., i, i] = output[..., i, i] / (M-1)
output[..., i, i] = output[..., i, i] / (M**2)
if m.size != 0:
mncond = (mncond[..., np.newaxis, np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def rvs(self, m, n, size=None, random_state=None):
"""Draw random samples from a multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw. Default is ``None``, in which case a
single variate is returned as an array with shape ``m.shape``.
%(_doc_random_state)s
Returns
-------
rvs : array_like
Random variates of shape ``size`` or ``m.shape``
(if ``size=None``).
Notes
-----
%(_doc_callparams_note)s
Also note that NumPy's `multivariate_hypergeometric` sampler is not
used as it doesn't support broadcasting.
"""
M, m, n, _, _, _ = self._process_parameters(m, n)
random_state = self._get_random_state(random_state)
if size is not None and isinstance(size, int):
size = (size, )
if size is None:
rvs = np.empty(m.shape, dtype=m.dtype)
else:
rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
rem = M
# This sampler has been taken from numpy gh-13794
# https://github.com/numpy/numpy/pull/13794
for c in range(m.shape[-1] - 1):
rem = rem - m[..., c]
rvs[..., c] = ((n != 0) *
random_state.hypergeometric(m[..., c], rem,
n + (n == 0),
size=size))
n = n - rvs[..., c]
rvs[..., m.shape[-1] - 1] = n
return rvs
multivariate_hypergeom = multivariate_hypergeom_gen()
class multivariate_hypergeom_frozen(multi_rv_frozen):
def __init__(self, m, n, seed=None):
self._dist = multivariate_hypergeom_gen(seed)
(self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond) = self._dist._process_parameters(m, n)
# monkey patch self._dist
def _process_parameters(m, n):
return (self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond)
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.m, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.m, self.n)
def mean(self):
return self._dist.mean(self.m, self.n)
def var(self):
return self._dist.var(self.m, self.n)
def cov(self):
return self._dist.cov(self.m, self.n)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.m, self.n,
size=size,
random_state=random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_hypergeom and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
method = multivariate_hypergeom_gen.__dict__[name]
method_frozen = multivariate_hypergeom_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, mhg_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
mhg_docdict_params)
| bsd-3-clause |
tomkralidis/pycsw | tests/unittests/test_opensearch.py | 5 | 2231 | # =================================================================
#
# Authors: Ricardo Garcia Silva <ricardo.garcia.silva@gmail.com>
#
# Copyright (c) 2017 Ricardo Garcia Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
"""Unit tests for pycsw.opensearch"""
import pytest
from pycsw import opensearch
pytestmark = pytest.mark.unit
@pytest.mark.parametrize("bbox, expected", [
([10, 30, -10, -30], True),
([10.0, 30.0, -10.0, -30.0], True),
(["10", "30", "-10", "-30"], True),
([-180, 30, -10, -30], True),
([180, 30, -10, -30], True),
([0, -90, -10, -30], True),
([0, 90, -10, -30], True),
([10, 30, -180, -30], True),
([10, 30, 180, -30], True),
([10, 30, -10, -90], True),
([10, 30, -10, 90], True),
([-190, 30, -10, -30], False),
([190, 30, -10, -30], False),
([10, 100, -10, -30], False),
([10, -100, -10, -30], False),
([10, 30, -190, -30], False),
([10, 30, 190, -30], False),
([10, 30, -10, -190], False),
([10, 30, -10, 190], False),
])
def test_validate_4326(bbox, expected):
result = opensearch.validate_4326(bbox)
assert result == expected | mit |
huaweiswitch/neutron | neutron/plugins/ofagent/agent/arp_lib.py | 6 | 6556 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu.app.ofctl import api as ryu_api
from ryu.lib import dpid as dpid_lib
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import packet
from ryu.lib.packet import vlan
from neutron.common import log
from neutron.openstack.common.gettextutils import _LI
from neutron.openstack.common import log as logging
import neutron.plugins.ofagent.agent.metadata as meta
LOG = logging.getLogger(__name__)
class ArpLib(object):
def __init__(self, ryuapp):
"""Constructor.
Define the internal table mapped an ip and a mac in a network.
self._arp_tbl:
{network1: {ip_addr: mac, ...},
network2: {ip_addr: mac, ...},
...,
}
:param ryuapp: object of the ryu app.
"""
self.ryuapp = ryuapp
self._arp_tbl = {}
self.br = None
def set_bridge(self, br):
self.br = br
@log.log
def _send_arp_reply(self, datapath, port, pkt):
ofp = datapath.ofproto
ofpp = datapath.ofproto_parser
pkt.serialize()
data = pkt.data
actions = [ofpp.OFPActionOutput(port=port)]
out = ofpp.OFPPacketOut(datapath=datapath,
buffer_id=ofp.OFP_NO_BUFFER,
in_port=ofp.OFPP_CONTROLLER,
actions=actions,
data=data)
ryu_api.send_msg(self.ryuapp, out)
@log.log
def _send_unknown_packet(self, msg, in_port, out_port):
datapath = msg.datapath
ofp = datapath.ofproto
ofpp = datapath.ofproto_parser
data = None
if msg.buffer_id == ofp.OFP_NO_BUFFER:
data = msg.data
actions = [ofpp.OFPActionOutput(port=out_port)]
out = ofpp.OFPPacketOut(datapath=datapath,
buffer_id=msg.buffer_id,
in_port=in_port,
actions=actions,
data=data)
ryu_api.send_msg(self.ryuapp, out)
def _respond_arp(self, datapath, port, arptbl,
pkt_ethernet, pkt_vlan, pkt_arp):
if pkt_arp.opcode != arp.ARP_REQUEST:
LOG.debug("unknown arp op %s", pkt_arp.opcode)
return False
ip_addr = pkt_arp.dst_ip
hw_addr = arptbl.get(ip_addr)
if hw_addr is None:
LOG.debug("unknown arp request %s", ip_addr)
return False
LOG.debug("responding arp request %(ip_addr)s -> %(hw_addr)s",
{'ip_addr': ip_addr, 'hw_addr': hw_addr})
pkt = packet.Packet()
pkt.add_protocol(ethernet.ethernet(ethertype=pkt_ethernet.ethertype,
dst=pkt_ethernet.src,
src=hw_addr))
if pkt_vlan:
pkt.add_protocol(vlan.vlan(cfi=pkt_vlan.cfi,
ethertype=pkt_vlan.ethertype,
pcp=pkt_vlan.pcp,
vid=pkt_vlan.vid))
pkt.add_protocol(arp.arp(opcode=arp.ARP_REPLY,
src_mac=hw_addr,
src_ip=ip_addr,
dst_mac=pkt_arp.src_mac,
dst_ip=pkt_arp.src_ip))
self._send_arp_reply(datapath, port, pkt)
return True
@log.log
def add_arp_table_entry(self, network, ip, mac):
if network in self._arp_tbl:
self._arp_tbl[network][ip] = mac
else:
self._arp_tbl[network] = {ip: mac}
@log.log
def del_arp_table_entry(self, network, ip):
del self._arp_tbl[network][ip]
if not self._arp_tbl[network]:
del self._arp_tbl[network]
def packet_in_handler(self, ev):
"""Check a packet-in message.
Build and output an arp reply if a packet-in message is
an arp packet.
"""
msg = ev.msg
LOG.debug("packet-in msg %s", msg)
datapath = msg.datapath
if self.br is None:
LOG.info(_LI("No bridge is set"))
return
if self.br.datapath.id != datapath.id:
LOG.info(_LI("Unknown bridge %(dpid)s ours %(ours)s"),
{"dpid": datapath.id, "ours": self.br.datapath.id})
return
ofp = datapath.ofproto
port = msg.match['in_port']
metadata = msg.match.get('metadata')
pkt = packet.Packet(msg.data)
LOG.info(_LI("packet-in dpid %(dpid)s in_port %(port)s pkt %(pkt)s"),
{'dpid': dpid_lib.dpid_to_str(datapath.id),
'port': port, 'pkt': pkt})
if metadata is None:
LOG.info(_LI("drop non tenant packet"))
return
network = metadata & meta.NETWORK_MASK
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
if not pkt_ethernet:
LOG.info(_LI("drop non-ethernet packet"))
return
pkt_vlan = pkt.get_protocol(vlan.vlan)
pkt_arp = pkt.get_protocol(arp.arp)
if not pkt_arp:
LOG.info(_LI("drop non-arp packet"))
return
arptbl = self._arp_tbl.get(network)
if arptbl:
if self._respond_arp(datapath, port, arptbl,
pkt_ethernet, pkt_vlan, pkt_arp):
return
else:
LOG.info(_LI("unknown network %s"), network)
# add a flow to skip a packet-in to a controller.
self.br.arp_passthrough(network=network, tpa=pkt_arp.dst_ip)
# send an unknown arp packet to the table.
self._send_unknown_packet(msg, port, ofp.OFPP_TABLE)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.