repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
DistrictDataLabs/tribe | tests/admin_script_tests.py | 2 | 2070 | # tests.admin_script_tests
# Use the subprocess module to execute tribe-admin.py for testing.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Wed Jun 22 15:48:08 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: admin_script_tests.py [6bf9822] benjamin@bengfort.com $
"""
Use the subprocess module to execute tribe-admin.py for testing.
This serves as a form of "integration testing" as well as interface testing.
"""
##########################################################################
## Imports
##########################################################################
import os
import unittest
import subprocess
from . import TEST_VERSION
##########################################################################
## Module Constants and Paths
##########################################################################
PROJECT = os.path.join(os.path.dirname(__file__), '..')
FIXTURES = os.path.join(os.path.dirname(__file__), 'fixtures')
MBOX = os.path.join(FIXTURES, "test.mbox")
ADMIN = os.path.join(PROJECT, "tribe-admin.py")
##########################################################################
## Admin Tests
##########################################################################
class TribeAdminTests(unittest.TestCase):
def test_paths(self):
"""
Assert test paths are available.
"""
for path in (MBOX, ADMIN):
if not os.path.exists(path):
self.fail("required file {} does not exist!".format(path))
if not os.path.isfile(path):
self.fail("required file {} is not readable!".format(path))
@unittest.skip("Not python 2.7 compatible for some reason")
def test_version(self):
"""
Test that the admin script reports the correct version
"""
output = subprocess.check_output(["python", ADMIN, "--version"])
output = output.decode('utf-8')
self.assertEqual(output, 'tribe v{}\n'.format(TEST_VERSION))
| mit |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_iterlen.py | 61 | 8146 | """ Test Iterator Length Transparency
Some functions or methods which accept general iterable arguments have
optional, more efficient code paths if they know how many items to expect.
For instance, map(func, iterable), will pre-allocate the exact amount of
space required whenever the iterable can report its length.
The desired invariant is: len(it)==len(list(it)).
A complication is that an iterable and iterator can be the same object. To
maintain the invariant, an iterator needs to dynamically update its length.
For instance, an iterable such as xrange(10) always reports its length as ten,
but it=iter(xrange(10)) starts at ten, and then goes to nine after it.next().
Having this capability means that map() can ignore the distinction between
map(func, iterable) and map(func, iter(iterable)).
When the iterable is immutable, the implementation can straight-forwardly
report the original length minus the cumulative number of calls to next().
This is the case for tuples, xrange objects, and itertools.repeat().
Some containers become temporarily immutable during iteration. This includes
dicts, sets, and collections.deque. Their implementation is equally simple
though they need to permantently set their length to zero whenever there is
an attempt to iterate after a length mutation.
The situation slightly more involved whenever an object allows length mutation
during iteration. Lists and sequence iterators are dynanamically updatable.
So, if a list is extended during iteration, the iterator will continue through
the new items. If it shrinks to a point before the most recent iteration,
then no further items are available and the length is reported at zero.
Reversed objects can also be wrapped around mutable objects; however, any
appends after the current position are ignored. Any other approach leads
to confusion and possibly returning the same item more than once.
The iterators not listed above, such as enumerate and the other itertools,
are not length transparent because they have no way to distinguish between
iterables that report static length and iterators whose length changes with
each call (i.e. the difference between enumerate('abc') and
enumerate(iter('abc')).
"""
import unittest
from test import test_support
from itertools import repeat
from collections import deque
from __builtin__ import len as _len
n = 10
def len(obj):
try:
return _len(obj)
except TypeError:
try:
# note: this is an internal undocumented API,
# don't rely on it in your own programs
return obj.__length_hint__()
except AttributeError:
raise TypeError
class TestInvariantWithoutMutations(unittest.TestCase):
def test_invariant(self):
it = self.it
for i in reversed(xrange(1, n+1)):
self.assertEqual(len(it), i)
it.next()
self.assertEqual(len(it), 0)
self.assertRaises(StopIteration, it.next)
self.assertEqual(len(it), 0)
class TestTemporarilyImmutable(TestInvariantWithoutMutations):
def test_immutable_during_iteration(self):
# objects such as deques, sets, and dictionaries enforce
# length immutability during iteration
it = self.it
self.assertEqual(len(it), n)
it.next()
self.assertEqual(len(it), n-1)
self.mutate()
self.assertRaises(RuntimeError, it.next)
self.assertEqual(len(it), 0)
## ------- Concrete Type Tests -------
class TestRepeat(TestInvariantWithoutMutations):
def setUp(self):
self.it = repeat(None, n)
def test_no_len_for_infinite_repeat(self):
# The repeat() object can also be infinite
self.assertRaises(TypeError, len, repeat(None))
class TestXrange(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(xrange(n))
class TestXrangeCustomReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(xrange(n))
class TestTuple(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(tuple(xrange(n)))
## ------- Types that should not be mutated during iteration -------
class TestDeque(TestTemporarilyImmutable):
def setUp(self):
d = deque(xrange(n))
self.it = iter(d)
self.mutate = d.pop
class TestDequeReversed(TestTemporarilyImmutable):
def setUp(self):
d = deque(xrange(n))
self.it = reversed(d)
self.mutate = d.pop
class TestDictKeys(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(xrange(n))
self.it = iter(d)
self.mutate = d.popitem
class TestDictItems(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(xrange(n))
self.it = d.iteritems()
self.mutate = d.popitem
class TestDictValues(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(xrange(n))
self.it = d.itervalues()
self.mutate = d.popitem
class TestSet(TestTemporarilyImmutable):
def setUp(self):
d = set(xrange(n))
self.it = iter(d)
self.mutate = d.pop
## ------- Types that can mutate during iteration -------
class TestList(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
def test_mutation(self):
d = range(n)
it = iter(d)
it.next()
it.next()
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-1) # grow with append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), [])
d.extend(xrange(20))
self.assertEqual(len(it), 0)
class TestListReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
def test_mutation(self):
d = range(n)
it = reversed(d)
it.next()
it.next()
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-2) # ignore append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), []) # confirm invariant
d.extend(xrange(20))
self.assertEqual(len(it), 0)
## -- Check to make sure exceptions are not suppressed by __length_hint__()
class BadLen(object):
def __iter__(self): return iter(range(10))
def __len__(self):
raise RuntimeError('hello')
class BadLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
raise RuntimeError('hello')
class NoneLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
return None
class TestLengthHintExceptions(unittest.TestCase):
def test_issue1242657(self):
self.assertRaises(RuntimeError, list, BadLen())
self.assertRaises(RuntimeError, list, BadLengthHint())
self.assertRaises(RuntimeError, [].extend, BadLen())
self.assertRaises(RuntimeError, [].extend, BadLengthHint())
self.assertRaises(RuntimeError, zip, BadLen())
self.assertRaises(RuntimeError, zip, BadLengthHint())
self.assertRaises(RuntimeError, filter, None, BadLen())
self.assertRaises(RuntimeError, filter, None, BadLengthHint())
self.assertRaises(RuntimeError, map, chr, BadLen())
self.assertRaises(RuntimeError, map, chr, BadLengthHint())
b = bytearray(range(10))
self.assertRaises(RuntimeError, b.extend, BadLen())
self.assertRaises(RuntimeError, b.extend, BadLengthHint())
def test_invalid_hint(self):
# Make sure an invalid result doesn't muck-up the works
self.assertEqual(list(NoneLengthHint()), list(range(10)))
def test_main():
unittests = [
TestRepeat,
TestXrange,
TestXrangeCustomReversed,
TestTuple,
TestDeque,
TestDequeReversed,
TestDictKeys,
TestDictItems,
TestDictValues,
TestSet,
TestList,
TestListReversed,
TestLengthHintExceptions,
]
test_support.run_unittest(*unittests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
Simran-B/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Tools/scripts/classfix.py | 96 | 5953 | #! /usr/bin/env python
# This script is obsolete -- it is kept for historical purposes only.
#
# Fix Python source files to use the new class definition syntax, i.e.,
# the syntax used in Python versions before 0.9.8:
# class C() = base(), base(), ...: ...
# is changed to the current syntax:
# class C(base, base, ...): ...
#
# The script uses heuristics to find class definitions that usually
# work but occasionally can fail; carefully check the output!
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
#
# Changes made are reported to stdout in a diff-like format.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixline() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
def main():
bad = 0
if not sys.argv[1:]: # No arguments
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
sys.exit(2)
for arg in sys.argv[1:]:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (filename,))
try:
f = open(filename, 'r')
except IOError, msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
g = None
# If we find a match, we rewind the file and start over but
# now copy everything to a temp file.
lineno = 0
while 1:
line = f.readline()
if not line: break
lineno = lineno + 1
while line[-2:] == '\\\n':
nextline = f.readline()
if not nextline: break
line = line + nextline
lineno = lineno + 1
newline = fixline(line)
if newline != line:
if g is None:
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
f.seek(0)
lineno = 0
rep(filename + ':\n')
continue # restart from the beginning
rep(repr(lineno) + '\n')
rep('< ' + line)
rep('> ' + newline)
if g is not None:
g.write(newline)
# End of file
f.close()
if not g: return 0 # No changes
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
# Return succes
return 0
# This expression doesn't catch *all* class definition headers,
# but it's pretty darn close.
classexpr = '^([ \t]*class +[a-zA-Z0-9_]+) *( *) *((=.*)?):'
classprog = re.compile(classexpr)
# Expressions for finding base class expressions.
baseexpr = '^ *(.*) *( *) *$'
baseprog = re.compile(baseexpr)
def fixline(line):
if classprog.match(line) < 0: # No 'class' keyword -- no change
return line
(a0, b0), (a1, b1), (a2, b2) = classprog.regs[:3]
# a0, b0 = Whole match (up to ':')
# a1, b1 = First subexpression (up to classname)
# a2, b2 = Second subexpression (=.*)
head = line[:b1]
tail = line[b0:] # Unmatched rest of line
if a2 == b2: # No base classes -- easy case
return head + ':' + tail
# Get rid of leading '='
basepart = line[a2+1:b2]
# Extract list of base expressions
bases = basepart.split(',')
# Strip trailing '()' from each base expression
for i in range(len(bases)):
if baseprog.match(bases[i]) >= 0:
x1, y1 = baseprog.regs[1]
bases[i] = bases[i][x1:y1]
# Join the bases back again and build the new line
basepart = ', '.join(bases)
return head + '(' + basepart + '):' + tail
if __name__ == '__main__':
main()
| apache-2.0 |
rmboggs/django | tests/migrations/test_multidb.py | 366 | 6909 | import unittest
from django.db import connection, migrations, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return False
class MigrateEverythingRouter(object):
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, app_label, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})
| bsd-3-clause |
Natgeoed/djorm-ext-pgarray | testing/pg_array_fields/models.py | 3 | 1845 | # -*- coding: utf-8 -*-
from django.db import models
from djorm_pgarray.fields import ArrayField
from djorm_pgarray.fields import TextArrayField
from djorm_pgarray.fields import FloatArrayField
from djorm_pgarray.fields import IntegerArrayField
from djorm_pgarray.fields import DateArrayField
from djorm_pgarray.fields import DateTimeArrayField
from djorm_pgarray.fields import SmallIntegerArrayField
def defaultval(*args, **kwargs):
return []
class Item(models.Model):
tags = TextArrayField(default=defaultval)
class Item2(models.Model):
tags = TextArrayField(default=[])
class IntModel(models.Model):
field = IntegerArrayField()
field2 = IntegerArrayField(dimension=2)
class TextModel(models.Model):
field = TextArrayField()
class MacAddrModel(models.Model):
field = ArrayField(dbtype="macaddr", type_cast=str)
class DoubleModel(models.Model):
field = FloatArrayField()
class MTextModel(models.Model):
data = TextArrayField(dimension=2)
class MultiTypeModel(models.Model):
smallints = SmallIntegerArrayField()
varchars = ArrayField(dbtype="varchar(30)")
class DateModel(models.Model):
dates = DateArrayField()
class DateTimeModel(models.Model):
dates = DateTimeArrayField()
class ChoicesModel(models.Model):
choices = TextArrayField(choices=[("A", "A"), ("B", "B")])
# This is need if you want compatibility with both, python2
# and python3. If you do not need one of them, simple remove
# the appropiate conditional branch
# TODO: at this momment not used
def _memoryview_to_bytes(value):
if isinstance(value, memoryview):
return value.tobytes()
if sys.version_info.major == 2:
if isinstance(value, buffer):
return str(buffer)
return value
class BytesArrayModel(models.Model):
entries = ArrayField(dbtype="bytea")
| bsd-3-clause |
szibis/Diamond | src/diamond/handler/Handler.py | 31 | 4249 | # coding=utf-8
import logging
import threading
import traceback
from configobj import ConfigObj
import time
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None, log=None):
"""
Create a new instance of the Handler class
"""
# Enabled? Default to yes, but allow handlers to disable themselves
self.enabled = True
# Initialize Log
if log is None:
self.log = logging.getLogger('diamond')
else:
self.log = log
# Initialize Blank Configs
self.config = ConfigObj()
# Load default
self.config.merge(self.get_default_config())
# Load in user
self.config.merge(config)
# error logging throttling
self.server_error_interval = float(
self.config['server_error_interval'])
self._errors = {}
# Initialize Lock
self.lock = threading.Lock()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
return {
'get_default_config_help': 'get_default_config_help',
'server_error_interval': ('How frequently to send repeated server '
'errors'),
}
def get_default_config(self):
"""
Return the default config for the handler
"""
return {
'get_default_config': 'get_default_config',
'server_error_interval': 120,
}
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
if not self.enabled:
return
try:
try:
self.lock.acquire()
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release()
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def _flush(self):
"""
Decorator for flushing handlers with an lock, catching exceptions
"""
if not self.enabled:
return
try:
try:
self.lock.acquire()
self.flush()
except Exception:
self.log.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release()
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
def _throttle_error(self, msg, *args, **kwargs):
"""
Avoids sending errors repeatedly. Waits at least
`self.server_error_interval` seconds before sending the same error
string to the error logging facility. If not enough time has passed,
it calls `log.debug` instead
Receives the same parameters as `Logger.error` an passes them on to the
selected logging function, but ignores all parameters but the main
message string when checking the last emission time.
:returns: the return value of `Logger.debug` or `Logger.error`
"""
now = time.time()
if msg in self._errors:
if ((now - self._errors[msg]) >=
self.server_error_interval):
fn = self.log.error
self._errors[msg] = now
else:
fn = self.log.debug
else:
self._errors[msg] = now
fn = self.log.error
return fn(msg, *args, **kwargs)
def _reset_errors(self, msg=None):
"""
Resets the logging throttle cache, so the next error is emitted
regardless of the value in `self.server_error_interval`
:param msg: if present, only this key is reset. Otherwise, the whole
cache is cleaned.
"""
if msg is not None and msg in self._errors:
del self._errors[msg]
else:
self._errors = {}
| mit |
nielsbuwen/ilastik | ilastik/applets/splitBodyCarving/opParseAnnotations.py | 4 | 8792 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import sys
import os
import collections
import json
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.roi import TinyVector
# Example Raveler bookmark json file:
"""
{
"data": [
{
"text": "split <username=ogundeyio> <time=1370275410> <status=review>",
"body ID": 4199,
"location": [
361,
478,
1531
]
},
{
"text": "split <username=ogundeyio> <time=1370275416> <status=review>",
"body ID": 4199,
"location": [
301,
352,
1531
]
},
{
"text": "Separate from bottom merge",
"body ID": 4182,
"location": [
176,
419,
1556
]
},
{
"text": "Needs to be separate",
"body ID": 4199,
"location": [
163,
244,
1564
]
}
],
"metadata": {
"username": "ogundeyio",
"software version": "1.7.15",
"description": "bookmarks",
"file version": 1,
"software revision": "4406",
"computer": "emrecon11.janelia.priv",
"date": "03-June-2013 14:49",
"session path": "/groups/flyem/data/medulla-FIB-Z1211-25-production/align2/substacks/00051_3508-4007_3759-4258_1500-1999/focused-910-sessions/ogundeyio.910",
"software": "Raveler"
}
}
"""
# Example Raveler substack.json file.
# Note that raveler substacks are viewed as 500**3 volumes with a 10 pixel border on all sides,
# which means that the volume ilastik actually loads is 520**3
# The bookmark Z-coordinates are GLOBAL to the entire stack, but the XY coordinates are relative
# to the 520**3 volume we have loaded.
# Therefore, we need to offset the Z-coordinates in any bookmarks we load using the idz1 and border fields below.
# In this example, idz1 = 1500, and border=10, which means the first Z-slice in the volume we loaded is slice 1490.
"""
{
"idz1": 1500,
"gray_view": true,
"idz2": 1999,
"substack_id": 51,
"stack_path": "/groups/flyem/data/medulla-FIB-Z1211-25-production/align2",
"ry2": 4268,
"basename": "iso.%05d.png",
"substack_path": "/groups/flyem/data/medulla-FIB-Z1211-25-production/align2/substacks/00051_3508-4007_3759-4258_1500-1999",
"idx2": 4007,
"rz2": 2009,
"rz1": 1490,
"raveler_view": true,
"rx1": 3498,
"idy1": 3759,
"idx1": 3508,
"rx2": 4017,
"border": 10,
"idy2": 4258,
"ry1": 3749
}
"""
class OpParseAnnotations(Operator):
AnnotationFilepath = InputSlot(stype='filepath')
BodyLabels = InputSlot()
# All outputs have dtype=object (2 are lists, one is a dict)
AnnotationLocations = OutputSlot()
AnnotationBodyIds = OutputSlot()
Annotations = OutputSlot()
# Annotation type
Annotation = collections.namedtuple( 'Annotation', ['ravelerLabel', 'comment'] )
def __init__(self, *args, **kwargs):
super( OpParseAnnotations, self ).__init__(*args, **kwargs)
self._annotations = None
def setupOutputs(self):
self.AnnotationLocations.meta.shape = (1,)
self.AnnotationLocations.meta.dtype = object
self.AnnotationBodyIds.meta.shape = (1,)
self.AnnotationBodyIds.meta.dtype = object
self.Annotations.meta.shape = (1,)
self.Annotations.meta.dtype = object
self._annotations = None
class AnnotationParsingException(Exception):
def __init__(self, msg, original_exc=None):
super(OpParseAnnotations.AnnotationParsingException, self).__init__()
self.original_exc = original_exc
self.msg = msg
def __str__(self):
return self.msg + " Caused by: {}".format( self.original_exc )
@classmethod
def _parseAnnotationFile(cls, annotation_filepath, body_label_img_slot):
"""
Returns dict of annotations of the form { coordinate_3d : Annotation }
"""
try:
with open(annotation_filepath) as annotationFile:
annotation_json_dict = json.load( annotationFile )
except Exception as ex:
raise cls.AnnotationParsingException(
"Failed to parse your bookmark file. It isn't valid JSON.", ex), None, sys.exc_info()[2]
if 'data' not in annotation_json_dict:
raise cls.AnnotationParsingException(
"Couldn't find the 'data' list in your bookmark file. Giving up."), None, sys.exc_info()[2]
# Before we parse the bookmarks data, locate the substack description
# to calculate the z-coordinate offset (see comment about substack coordinates, above)
bookmark_dir = os.path.split(annotation_filepath)[0]
substack_dir = os.path.split(bookmark_dir)[0]
substack_description_path = os.path.join( substack_dir, 'substack.json' )
try:
with open(substack_description_path) as substack_description_file:
substack_description_json_dict = json.load( substack_description_file )
except Exception as ex:
raise cls.AnnotationParsingException(
"Failed to parse SUBSTACK",
"Attempted to open substack description file:\n {}"
"\n but something went wrong. See console output for details. Giving up."
.format(substack_description_path) ), None, sys.exc_info()[2]
# See comment above about why we have to subtract a Z-offset
z_offset = substack_description_json_dict['idz1'] - substack_description_json_dict['border']
# Each bookmark is a dict (see example above)
annotations = {}
bookmarks = annotation_json_dict['data']
for bookmark in bookmarks:
if 'text' in bookmark and str(bookmark['text']).lower().find( 'split' ) != -1:
coord3d = bookmark['location']
coord3d[1] = 520 - coord3d[1] # Raveler y-axis is inverted (Raveler substacks are 520 cubes)
coord3d[2] -= z_offset # See comments above re: substack coordinates
coord3d = tuple(coord3d)
coord5d = (0,) + coord3d + (0,)
pos = TinyVector(coord5d)
sample_roi = (pos, pos+1)
# For debug purposes, we sometimes load a smaller volume than the original.
# Don't import bookmarks that fall outside our volume
if (pos < body_label_img_slot.meta.shape).all():
# Sample the label volume to determine the body id (raveler label)
label_sample = body_label_img_slot(*sample_roi).wait()
annotations[coord3d] = OpParseAnnotations.Annotation( ravelerLabel=label_sample[0,0,0,0,0],
comment=str(bookmark['text']) )
return annotations
def execute(self, slot, subindex, roi, result):
# Parse file and cache results.
if self._annotations is None:
annotation_filepath = self.AnnotationFilepath.value
self._annotations = OpParseAnnotations._parseAnnotationFile(annotation_filepath, self.BodyLabels)
if slot == self.Annotations:
result[0] = self._annotations
elif slot == self.AnnotationLocations:
result[0] = sorted( self._annotations.keys() )
elif slot == self.AnnotationBodyIds:
result[0] = sorted( set( map( lambda (label, comment): label, self._annotations.values() ) ) )
else:
assert False, "Unknown output slot: {}".format( slot.name )
def propagateDirty(self, slot, subindex, roi):
# Everything is dirty
self._annotations = None
self.AnnotationLocations.setDirty()
self.AnnotationBodyIds.setDirty()
self.Annotations.setDirty()
| gpl-3.0 |
nysan/yocto-autobuilder | lib/python2.6/site-packages/Twisted-11.0.0-py2.6-linux-x86_64.egg/twisted/names/test/test_dns.py | 18 | 43605 | # test-case-name: twisted.names.test.test_dns
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.names.dns.
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import struct
from twisted.python.failure import Failure
from twisted.internet import address, task
from twisted.internet.error import CannotListenError, ConnectionDone
from twisted.trial import unittest
from twisted.names import dns
from twisted.test import proto_helpers
class RoundtripDNSTestCase(unittest.TestCase):
"""Encoding and then decoding various objects."""
names = ["example.org", "go-away.fish.tv", "23strikesback.net"]
def testName(self):
for n in self.names:
# encode the name
f = StringIO()
dns.Name(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Name()
result.decode(f)
self.assertEquals(result.name, n)
def testQuery(self):
for n in self.names:
for dnstype in range(1, 17):
for dnscls in range(1, 5):
# encode the query
f = StringIO()
dns.Query(n, dnstype, dnscls).encode(f)
# decode the result
f.seek(0, 0)
result = dns.Query()
result.decode(f)
self.assertEquals(result.name.name, n)
self.assertEquals(result.type, dnstype)
self.assertEquals(result.cls, dnscls)
def testRR(self):
# encode the RR
f = StringIO()
dns.RRHeader("test.org", 3, 4, 17).encode(f)
# decode the result
f.seek(0, 0)
result = dns.RRHeader()
result.decode(f)
self.assertEquals(str(result.name), "test.org")
self.assertEquals(result.type, 3)
self.assertEquals(result.cls, 4)
self.assertEquals(result.ttl, 17)
def testResources(self):
names = (
"this.are.test.name",
"will.compress.will.this.will.name.will.hopefully",
"test.CASE.preSErVatIOn.YeAH",
"a.s.h.o.r.t.c.a.s.e.t.o.t.e.s.t",
"singleton"
)
for s in names:
f = StringIO()
dns.SimpleRecord(s).encode(f)
f.seek(0, 0)
result = dns.SimpleRecord()
result.decode(f)
self.assertEquals(str(result.name), s)
def test_hashable(self):
"""
Instances of all record types are hashable.
"""
records = [
dns.Record_NS, dns.Record_MD, dns.Record_MF, dns.Record_CNAME,
dns.Record_MB, dns.Record_MG, dns.Record_MR, dns.Record_PTR,
dns.Record_DNAME, dns.Record_A, dns.Record_SOA, dns.Record_NULL,
dns.Record_WKS, dns.Record_SRV, dns.Record_AFSDB, dns.Record_RP,
dns.Record_HINFO, dns.Record_MINFO, dns.Record_MX, dns.Record_TXT,
dns.Record_AAAA, dns.Record_A6, dns.Record_NAPTR
]
for k in records:
k1, k2 = k(), k()
hk1 = hash(k1)
hk2 = hash(k2)
self.assertEquals(hk1, hk2, "%s != %s (for %s)" % (hk1,hk2,k))
def test_Charstr(self):
"""
Test L{dns.Charstr} encode and decode.
"""
for n in self.names:
# encode the name
f = StringIO()
dns.Charstr(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Charstr()
result.decode(f)
self.assertEquals(result.string, n)
def test_NAPTR(self):
"""
Test L{dns.Record_NAPTR} encode and decode.
"""
naptrs = [(100, 10, "u", "sip+E2U",
"!^.*$!sip:information@domain.tld!", ""),
(100, 50, "s", "http+I2L+I2C+I2R", "",
"_http._tcp.gatech.edu")]
for (order, preference, flags, service, regexp, replacement) in naptrs:
rin = dns.Record_NAPTR(order, preference, flags, service, regexp,
replacement)
e = StringIO()
rin.encode(e)
e.seek(0,0)
rout = dns.Record_NAPTR()
rout.decode(e)
self.assertEquals(rin.order, rout.order)
self.assertEquals(rin.preference, rout.preference)
self.assertEquals(rin.flags, rout.flags)
self.assertEquals(rin.service, rout.service)
self.assertEquals(rin.regexp, rout.regexp)
self.assertEquals(rin.replacement.name, rout.replacement.name)
self.assertEquals(rin.ttl, rout.ttl)
class MessageTestCase(unittest.TestCase):
"""
Tests for L{twisted.names.dns.Message}.
"""
def testEmptyMessage(self):
"""
Test that a message which has been truncated causes an EOFError to
be raised when it is parsed.
"""
msg = dns.Message()
self.assertRaises(EOFError, msg.fromStr, '')
def testEmptyQuery(self):
"""
Test that bytes representing an empty query message can be decoded
as such.
"""
msg = dns.Message()
msg.fromStr(
'\x01\x00' # Message ID
'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive bit
'\x00' # recursion bit, empty bit, empty bit, empty bit, response code nibble
'\x00\x00' # number of queries
'\x00\x00' # number of answers
'\x00\x00' # number of authorities
'\x00\x00' # number of additionals
)
self.assertEquals(msg.id, 256)
self.failIf(msg.answer, "Message was not supposed to be an answer.")
self.assertEquals(msg.opCode, dns.OP_QUERY)
self.failIf(msg.auth, "Message was not supposed to be authoritative.")
self.failIf(msg.trunc, "Message was not supposed to be truncated.")
self.assertEquals(msg.queries, [])
self.assertEquals(msg.answers, [])
self.assertEquals(msg.authority, [])
self.assertEquals(msg.additional, [])
def testNULL(self):
bytes = ''.join([chr(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader('testname', dns.NULL, payload=rec)
msg1 = dns.Message()
msg1.answers.append(rr)
s = StringIO()
msg1.encode(s)
s.seek(0, 0)
msg2 = dns.Message()
msg2.decode(s)
self.failUnless(isinstance(msg2.answers[0].payload, dns.Record_NULL))
self.assertEquals(msg2.answers[0].payload.payload, bytes)
def test_lookupRecordTypeDefault(self):
"""
L{Message.lookupRecordType} returns C{None} if it is called
with an integer which doesn't correspond to any known record
type.
"""
# 65280 is the first value in the range reserved for private
# use, so it shouldn't ever conflict with an officially
# allocated value.
self.assertIdentical(dns.Message().lookupRecordType(65280), None)
class TestController(object):
"""
Pretend to be a DNS query processor for a DNSDatagramProtocol.
@ivar messages: the list of received messages.
@type messages: C{list} of (msg, protocol, address)
"""
def __init__(self):
"""
Initialize the controller: create a list of messages.
"""
self.messages = []
def messageReceived(self, msg, proto, addr):
"""
Save the message so that it can be checked during the tests.
"""
self.messages.append((msg, proto, addr))
class DatagramProtocolTestCase(unittest.TestCase):
"""
Test various aspects of L{dns.DNSDatagramProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSDatagramProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestController()
self.proto = dns.DNSDatagramProtocol(self.controller)
transport = proto_helpers.FakeDatagramTransport()
self.proto.makeConnection(transport)
self.proto.callLater = self.clock.callLater
def test_truncatedPacket(self):
"""
Test that when a short datagram is received, datagramReceived does
not raise an exception while processing it.
"""
self.proto.datagramReceived('',
address.IPv4Address('UDP', '127.0.0.1', 12345))
self.assertEquals(self.controller.messages, [])
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query('foo')])
self.assertEquals(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = self.proto.liveMessages.items()[0][0]
m.answers = [dns.RRHeader(payload=dns.Record_A(address='1.2.3.4'))]
called = False
def cb(result):
self.assertEquals(result.answers[0].payload.dottedQuad(), '1.2.3.4')
d.addCallback(cb)
self.proto.datagramReceived(m.toStr(), ('127.0.0.1', 21345))
return d
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query('foo')])
self.assertEquals(len(self.proto.liveMessages), 1)
self.clock.advance(10)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEquals(len(self.proto.liveMessages), 0)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def writeError(message, addr):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query(('127.0.0.1', 21345), [dns.Query('foo')])
return self.assertFailure(d, RuntimeError)
def test_listenError(self):
"""
Exception L{CannotListenError} raised by C{listenUDP} should be turned
into a C{Failure} passed to errback of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def startListeningError():
raise CannotListenError(None, None, None)
self.proto.startListening = startListeningError
# Clean up transport so that the protocol calls startListening again
self.proto.transport = None
d = self.proto.query(('127.0.0.1', 21345), [dns.Query('foo')])
return self.assertFailure(d, CannotListenError)
class TestTCPController(TestController):
"""
Pretend to be a DNS query processor for a DNSProtocol.
@ivar connections: A list of L{DNSProtocol} instances which have
notified this controller that they are connected and have not
yet notified it that their connection has been lost.
"""
def __init__(self):
TestController.__init__(self)
self.connections = []
def connectionMade(self, proto):
self.connections.append(proto)
def connectionLost(self, proto):
self.connections.remove(proto)
class DNSProtocolTestCase(unittest.TestCase):
"""
Test various aspects of L{dns.DNSProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestTCPController()
self.proto = dns.DNSProtocol(self.controller)
self.proto.makeConnection(proto_helpers.StringTransport())
self.proto.callLater = self.clock.callLater
def test_connectionTracking(self):
"""
L{dns.DNSProtocol} calls its controller's C{connectionMade}
method with itself when it is connected to a transport and its
controller's C{connectionLost} method when it is disconnected.
"""
self.assertEqual(self.controller.connections, [self.proto])
self.proto.connectionLost(
Failure(ConnectionDone("Fake Connection Done")))
self.assertEqual(self.controller.connections, [])
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query([dns.Query('foo')])
self.assertEquals(len(self.proto.liveMessages), 1)
self.clock.advance(60)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEquals(len(self.proto.liveMessages), 0)
return d
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query([dns.Query('foo')])
self.assertEquals(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = self.proto.liveMessages.items()[0][0]
m.answers = [dns.RRHeader(payload=dns.Record_A(address='1.2.3.4'))]
called = False
def cb(result):
self.assertEquals(result.answers[0].payload.dottedQuad(), '1.2.3.4')
d.addCallback(cb)
s = m.toStr()
s = struct.pack('!H', len(s)) + s
self.proto.dataReceived(s)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSProtocol.query}.
"""
def writeError(message):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query([dns.Query('foo')])
return self.assertFailure(d, RuntimeError)
class ReprTests(unittest.TestCase):
"""
Tests for the C{__repr__} implementation of record classes.
"""
def test_ns(self):
"""
The repr of a L{dns.Record_NS} instance includes the name of the
nameserver and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NS('example.com', 4321)),
"<NS name=example.com ttl=4321>")
def test_md(self):
"""
The repr of a L{dns.Record_MD} instance includes the name of the
mail destination and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MD('example.com', 4321)),
"<MD name=example.com ttl=4321>")
def test_mf(self):
"""
The repr of a L{dns.Record_MF} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MF('example.com', 4321)),
"<MF name=example.com ttl=4321>")
def test_cname(self):
"""
The repr of a L{dns.Record_CNAME} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_CNAME('example.com', 4321)),
"<CNAME name=example.com ttl=4321>")
def test_mb(self):
"""
The repr of a L{dns.Record_MB} instance includes the name of the
mailbox and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MB('example.com', 4321)),
"<MB name=example.com ttl=4321>")
def test_mg(self):
"""
The repr of a L{dns.Record_MG} instance includes the name of the
mail group memeber and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MG('example.com', 4321)),
"<MG name=example.com ttl=4321>")
def test_mr(self):
"""
The repr of a L{dns.Record_MR} instance includes the name of the
mail rename domain and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MR('example.com', 4321)),
"<MR name=example.com ttl=4321>")
def test_ptr(self):
"""
The repr of a L{dns.Record_PTR} instance includes the name of the
pointer and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_PTR('example.com', 4321)),
"<PTR name=example.com ttl=4321>")
def test_dname(self):
"""
The repr of a L{dns.Record_DNAME} instance includes the name of the
non-terminal DNS name redirection and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_DNAME('example.com', 4321)),
"<DNAME name=example.com ttl=4321>")
def test_a(self):
"""
The repr of a L{dns.Record_A} instance includes the dotted-quad
string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A('1.2.3.4', 567)),
'<A address=1.2.3.4 ttl=567>')
def test_soa(self):
"""
The repr of a L{dns.Record_SOA} instance includes all of the
authority fields.
"""
self.assertEqual(
repr(dns.Record_SOA(mname='mName', rname='rName', serial=123,
refresh=456, retry=789, expire=10,
minimum=11, ttl=12)),
"<SOA mname=mName rname=rName serial=123 refresh=456 "
"retry=789 expire=10 minimum=11 ttl=12>")
def test_null(self):
"""
The repr of a L{dns.Record_NULL} instance includes the repr of its
payload and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NULL('abcd', 123)),
"<NULL payload='abcd' ttl=123>")
def test_wks(self):
"""
The repr of a L{dns.Record_WKS} instance includes the dotted-quad
string representation of the address it is for, the IP protocol
number it is for, and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_WKS('2.3.4.5', 7, ttl=8)),
"<WKS address=2.3.4.5 protocol=7 ttl=8>")
def test_aaaa(self):
"""
The repr of a L{dns.Record_AAAA} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_AAAA('8765::1234', ttl=10)),
"<AAAA address=8765::1234 ttl=10>")
def test_a6(self):
"""
The repr of a L{dns.Record_A6} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A6(0, '1234::5678', 'foo.bar', ttl=10)),
"<A6 suffix=1234::5678 prefix=foo.bar ttl=10>")
def test_srv(self):
"""
The repr of a L{dns.Record_SRV} instance includes the name and port of
the target and the priority, weight, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_SRV(1, 2, 3, 'example.org', 4)),
"<SRV priority=1 weight=2 target=example.org port=3 ttl=4>")
def test_naptr(self):
"""
The repr of a L{dns.Record_NAPTR} instance includes the order,
preference, flags, service, regular expression, replacement, and TTL of
the record.
"""
self.assertEqual(
repr(dns.Record_NAPTR(5, 9, "S", "http", "/foo/bar/i", "baz", 3)),
"<NAPTR order=5 preference=9 flags=S service=http "
"regexp=/foo/bar/i replacement=baz ttl=3>")
def test_afsdb(self):
"""
The repr of a L{dns.Record_AFSDB} instance includes the subtype,
hostname, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_AFSDB(3, 'example.org', 5)),
"<AFSDB subtype=3 hostname=example.org ttl=5>")
def test_rp(self):
"""
The repr of a L{dns.Record_RP} instance includes the mbox, txt, and TTL
fields of the record.
"""
self.assertEqual(
repr(dns.Record_RP('alice.example.com', 'admin.example.com', 3)),
"<RP mbox=alice.example.com txt=admin.example.com ttl=3>")
def test_hinfo(self):
"""
The repr of a L{dns.Record_HINFO} instance includes the cpu, os, and
TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_HINFO('sparc', 'minix', 12)),
"<HINFO cpu='sparc' os='minix' ttl=12>")
def test_minfo(self):
"""
The repr of a L{dns.Record_MINFO} instance includes the rmailbx,
emailbx, and TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_MINFO('alice.example.com', 'bob.example.com', 15)),
"<MINFO responsibility=alice.example.com "
"errors=bob.example.com ttl=15>")
def test_mx(self):
"""
The repr of a L{dns.Record_MX} instance includes the preference, name,
and TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_MX(13, 'mx.example.com', 2)),
"<MX preference=13 name=mx.example.com ttl=2>")
def test_txt(self):
"""
The repr of a L{dns.Record_TXT} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_TXT("foo", "bar", ttl=15)),
"<TXT data=['foo', 'bar'] ttl=15>")
def test_spf(self):
"""
The repr of a L{dns.Record_SPF} instance includes the data and ttl
fields of the record, since it is structurally
similar to L{dns.Record_TXT}.
"""
self.assertEqual(
repr(dns.Record_SPF("foo", "bar", ttl=15)),
"<SPF data=['foo', 'bar'] ttl=15>")
class _Equal(object):
"""
A class the instances of which are equal to anything and everything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
class _NotEqual(object):
"""
A class the instances of which are equal to nothing.
"""
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class EqualityTests(unittest.TestCase):
"""
Tests for the equality and non-equality behavior of record classes.
"""
def _equalityTest(self, firstValueOne, secondValueOne, valueTwo):
"""
Assert that C{firstValueOne} is equal to C{secondValueOne} but not
equal to C{valueOne} and that it defines equality cooperatively with
other types it doesn't know about.
"""
# This doesn't use assertEqual and assertNotEqual because the exact
# operator those functions use is not very well defined. The point
# of these assertions is to check the results of the use of specific
# operators (precisely to ensure that using different permutations
# (eg "x == y" or "not (x != y)") which should yield the same results
# actually does yield the same result). -exarkun
self.assertTrue(firstValueOne == firstValueOne)
self.assertTrue(firstValueOne == secondValueOne)
self.assertFalse(firstValueOne == valueTwo)
self.assertFalse(firstValueOne != firstValueOne)
self.assertFalse(firstValueOne != secondValueOne)
self.assertTrue(firstValueOne != valueTwo)
self.assertTrue(firstValueOne == _Equal())
self.assertFalse(firstValueOne != _Equal())
self.assertFalse(firstValueOne == _NotEqual())
self.assertTrue(firstValueOne != _NotEqual())
def _simpleEqualityTest(self, cls):
# Vary the TTL
self._equalityTest(
cls('example.com', 123),
cls('example.com', 123),
cls('example.com', 321))
# Vary the name
self._equalityTest(
cls('example.com', 123),
cls('example.com', 123),
cls('example.org', 123))
def test_rrheader(self):
"""
Two L{dns.RRHeader} instances compare equal if and only if they have
the same name, type, class, time to live, payload, and authoritative
bit.
"""
# Vary the name
self._equalityTest(
dns.RRHeader('example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.org', payload=dns.Record_A('1.2.3.4')))
# Vary the payload
self._equalityTest(
dns.RRHeader('example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', payload=dns.Record_A('1.2.3.5')))
# Vary the type. Leave the payload as None so that we don't have to
# provide non-equal values.
self._equalityTest(
dns.RRHeader('example.com', dns.A),
dns.RRHeader('example.com', dns.A),
dns.RRHeader('example.com', dns.MX))
# Probably not likely to come up. Most people use the internet.
self._equalityTest(
dns.RRHeader('example.com', cls=dns.IN, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', cls=dns.IN, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', cls=dns.CS, payload=dns.Record_A('1.2.3.4')))
# Vary the ttl
self._equalityTest(
dns.RRHeader('example.com', ttl=60, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', ttl=60, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', ttl=120, payload=dns.Record_A('1.2.3.4')))
# Vary the auth bit
self._equalityTest(
dns.RRHeader('example.com', auth=1, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', auth=1, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader('example.com', auth=0, payload=dns.Record_A('1.2.3.4')))
def test_ns(self):
"""
Two L{dns.Record_NS} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_NS)
def test_md(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MD)
def test_mf(self):
"""
Two L{dns.Record_MF} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MF)
def test_cname(self):
"""
Two L{dns.Record_CNAME} instances compare equal if and only if they
have the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_CNAME)
def test_mb(self):
"""
Two L{dns.Record_MB} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MB)
def test_mg(self):
"""
Two L{dns.Record_MG} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MG)
def test_mr(self):
"""
Two L{dns.Record_MR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MR)
def test_ptr(self):
"""
Two L{dns.Record_PTR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_PTR)
def test_dname(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_DNAME)
def test_a(self):
"""
Two L{dns.Record_A} instances compare equal if and only if they have
the same address and TTL.
"""
# Vary the TTL
self._equalityTest(
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.4', 6))
# Vary the address
self._equalityTest(
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.5', 5))
def test_soa(self):
"""
Two L{dns.Record_SOA} instances compare equal if and only if they have
the same mname, rname, serial, refresh, minimum, expire, retry, and
ttl.
"""
# Vary the mname
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('xname', 'rname', 123, 456, 789, 10, 20, 30))
# Vary the rname
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'xname', 123, 456, 789, 10, 20, 30))
# Vary the serial
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 1, 456, 789, 10, 20, 30))
# Vary the refresh
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 1, 789, 10, 20, 30))
# Vary the minimum
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 1, 10, 20, 30))
# Vary the expire
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 1, 20, 30))
# Vary the retry
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 1, 30))
# Vary the ttl
self._equalityTest(
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA('mname', 'xname', 123, 456, 789, 10, 20, 1))
def test_null(self):
"""
Two L{dns.Record_NULL} instances compare equal if and only if they have
the same payload and ttl.
"""
# Vary the payload
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('bar foo', 10))
# Vary the ttl
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 100))
def test_wks(self):
"""
Two L{dns.Record_WKS} instances compare equal if and only if they have
the same address, protocol, map, and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('4.3.2.1', 1, 'foo', 2))
# Vary the protocol
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 100, 'foo', 2))
# Vary the map
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'bar', 2))
# Vary the ttl
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 200))
def test_aaaa(self):
"""
Two L{dns.Record_AAAA} instances compare equal if and only if they have
the same address and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('2::1', 1))
# Vary the ttl
self._equalityTest(
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('1::2', 10))
def test_a6(self):
"""
Two L{dns.Record_A6} instances compare equal if and only if they have
the same prefix, prefix length, suffix, and ttl.
"""
# Note, A6 is crazy, I'm not sure these values are actually legal.
# Hopefully that doesn't matter for this test. -exarkun
# Vary the prefix length
self._equalityTest(
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(32, '::abcd', 'example.com', 10))
# Vary the suffix
self._equalityTest(
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd:0', 'example.com', 10))
# Vary the prefix
self._equalityTest(
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd', 'example.org', 10))
# Vary the ttl
self._equalityTest(
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd', 'example.com', 10),
dns.Record_A6(16, '::abcd', 'example.com', 100))
def test_srv(self):
"""
Two L{dns.Record_SRV} instances compare equal if and only if they have
the same priority, weight, port, target, and ttl.
"""
# Vary the priority
self._equalityTest(
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(100, 20, 30, 'example.com', 40))
# Vary the weight
self._equalityTest(
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 200, 30, 'example.com', 40))
# Vary the port
self._equalityTest(
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 300, 'example.com', 40))
# Vary the target
self._equalityTest(
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.org', 40))
# Vary the ttl
self._equalityTest(
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.com', 40),
dns.Record_SRV(10, 20, 30, 'example.com', 400))
def test_naptr(self):
"""
Two L{dns.Record_NAPTR} instances compare equal if and only if they
have the same order, preference, flags, service, regexp, replacement,
and ttl.
"""
# Vary the order
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(2, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12))
# Vary the preference
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 3, "u", "sip+E2U", "/foo/bar/", "baz", 12))
# Vary the flags
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "p", "sip+E2U", "/foo/bar/", "baz", 12))
# Vary the service
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "http", "/foo/bar/", "baz", 12))
# Vary the regexp
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/bar/foo/", "baz", 12))
# Vary the replacement
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/bar/foo/", "quux", 12))
# Vary the ttl
self._equalityTest(
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/foo/bar/", "baz", 12),
dns.Record_NAPTR(1, 2, "u", "sip+E2U", "/bar/foo/", "baz", 5))
def test_afsdb(self):
"""
Two L{dns.Record_AFSDB} instances compare equal if and only if they
have the same subtype, hostname, and ttl.
"""
# Vary the subtype
self._equalityTest(
dns.Record_AFSDB(1, 'example.com', 2),
dns.Record_AFSDB(1, 'example.com', 2),
dns.Record_AFSDB(2, 'example.com', 2))
# Vary the hostname
self._equalityTest(
dns.Record_AFSDB(1, 'example.com', 2),
dns.Record_AFSDB(1, 'example.com', 2),
dns.Record_AFSDB(1, 'example.org', 2))
# Vary the ttl
self._equalityTest(
dns.Record_AFSDB(1, 'example.com', 2),
dns.Record_AFSDB(1, 'example.com', 2),
dns.Record_AFSDB(1, 'example.com', 3))
def test_rp(self):
"""
Two L{Record_RP} instances compare equal if and only if they have the
same mbox, txt, and ttl.
"""
# Vary the mbox
self._equalityTest(
dns.Record_RP('alice.example.com', 'alice is nice', 10),
dns.Record_RP('alice.example.com', 'alice is nice', 10),
dns.Record_RP('bob.example.com', 'alice is nice', 10))
# Vary the txt
self._equalityTest(
dns.Record_RP('alice.example.com', 'alice is nice', 10),
dns.Record_RP('alice.example.com', 'alice is nice', 10),
dns.Record_RP('alice.example.com', 'alice is not nice', 10))
# Vary the ttl
self._equalityTest(
dns.Record_RP('alice.example.com', 'alice is nice', 10),
dns.Record_RP('alice.example.com', 'alice is nice', 10),
dns.Record_RP('alice.example.com', 'alice is nice', 100))
def test_hinfo(self):
"""
Two L{dns.Record_HINFO} instances compare equal if and only if they
have the same cpu, os, and ttl.
"""
# Vary the cpu
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('i386', 'plan9', 10))
# Vary the os
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan11', 10))
# Vary the ttl
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 100))
def test_minfo(self):
"""
Two L{dns.Record_MINFO} instances compare equal if and only if they
have the same rmailbx, emailbx, and ttl.
"""
# Vary the rmailbx
self._equalityTest(
dns.Record_MINFO('rmailbox', 'emailbox', 10),
dns.Record_MINFO('rmailbox', 'emailbox', 10),
dns.Record_MINFO('someplace', 'emailbox', 10))
# Vary the emailbx
self._equalityTest(
dns.Record_MINFO('rmailbox', 'emailbox', 10),
dns.Record_MINFO('rmailbox', 'emailbox', 10),
dns.Record_MINFO('rmailbox', 'something', 10))
# Vary the ttl
self._equalityTest(
dns.Record_MINFO('rmailbox', 'emailbox', 10),
dns.Record_MINFO('rmailbox', 'emailbox', 10),
dns.Record_MINFO('rmailbox', 'emailbox', 100))
def test_mx(self):
"""
Two L{dns.Record_MX} instances compare equal if and only if they have
the same preference, name, and ttl.
"""
# Vary the preference
self._equalityTest(
dns.Record_MX(10, 'example.org', 20),
dns.Record_MX(10, 'example.org', 20),
dns.Record_MX(100, 'example.org', 20))
# Vary the name
self._equalityTest(
dns.Record_MX(10, 'example.org', 20),
dns.Record_MX(10, 'example.org', 20),
dns.Record_MX(10, 'example.net', 20))
# Vary the ttl
self._equalityTest(
dns.Record_MX(10, 'example.org', 20),
dns.Record_MX(10, 'example.org', 20),
dns.Record_MX(10, 'example.org', 200))
def test_txt(self):
"""
Two L{dns.Record_TXT} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=100))
def test_spf(self):
"""
L{dns.Record_SPF} records are structurally similar to L{dns.Record_TXT}
records, so they are equal if and only if they have the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=100))
| gpl-2.0 |
SunghanKim/numpy | numpy/f2py/use_rules.py | 188 | 3652 | #!/usr/bin/env python
"""
Build 'use others module data' mechanism for f2py2e.
Unfinished.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2000/09/10 12:35:43 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.3 $"[10:-1]
f2py_version = 'See `f2py -v`'
from .auxfuncs import (
applyrules, dictappend, gentitle, hasnote, outmess
)
usemodule_rules = {
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\
\t #name# = get_#name#()\\n\\
Arguments:\\n\\
#docstr#\";
extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#);
static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) {
/*#decl#*/
\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail;
printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#));
\treturn Py_BuildValue(\"\");
capi_fail:
\treturn NULL;
}
""",
'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},',
'need': ['F_MODFUNC']
}
################
def buildusevars(m, r):
ret = {}
outmess(
'\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name']))
varsmap = {}
revmap = {}
if 'map' in r:
for k in r['map'].keys():
if r['map'][k] in revmap:
outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % (
r['map'][k], k, revmap[r['map'][k]]))
else:
revmap[r['map'][k]] = k
if 'only' in r and r['only']:
for v in r['map'].keys():
if r['map'][v] in m['vars']:
if revmap[r['map'][v]] == v:
varsmap[v] = r['map'][v]
else:
outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' %
(v, r['map'][v]))
else:
outmess(
'\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v]))
else:
for v in m['vars'].keys():
if v in revmap:
varsmap[v] = revmap[v]
else:
varsmap[v] = v
for v in varsmap.keys():
ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name']))
return ret
def buildusevar(name, realname, vars, usemodulename):
outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % (
name, realname))
ret = {}
vrd = {'name': name,
'realname': realname,
'REALNAME': realname.upper(),
'usemodulename': usemodulename,
'USEMODULENAME': usemodulename.upper(),
'texname': name.replace('_', '\\_'),
'begintitle': gentitle('%s=>%s' % (name, realname)),
'endtitle': gentitle('end of %s=>%s' % (name, realname)),
'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename)
}
nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv',
5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'}
vrd['texnamename'] = name
for i in nummap.keys():
vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i])
if hasnote(vars[realname]):
vrd['note'] = vars[realname]['note']
rd = dictappend({}, vrd)
print(name, realname, vars[realname])
ret = applyrules(usemodule_rules, rd)
return ret
| bsd-3-clause |
tareqalayan/ansible | lib/ansible/modules/windows/win_certificate_store.py | 16 | 6975 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_certificate_store
version_added: '2.5'
short_description: Manages the certificate store
description:
- Used to import/export and remove certificates and keys from the local
certificate store.
- This module is not used to create certificates and will only manage existing
certs as a file or in the store.
- It can be used to import PEM, DER, P7B, PKCS12 (PFX) certificates and export
PEM, DER and PKCS12 certificates.
options:
state:
description:
- If C(present), will ensure that the certificate at I(path) is imported
into the certificate store specified.
- If C(absent), will ensure that the certificate specified by I(thumbprint)
or the thumbprint of the cert at I(path) is removed from the store
specified.
- If C(exported), will ensure the file at I(path) is a certificate
specified by I(thumbprint).
- When exporting a certificate, if I(path) is a directory then the module
will fail, otherwise the file will be replaced if needed.
choices: [ absent, exported, present ]
default: present
path:
description:
- The path to a certificate file.
- This is required when I(state) is C(present) or C(exported).
- When I(state) is C(absent) and I(thumbprint) is not specified, the
thumbprint is derived from the certificate at this path.
thumbprint:
description:
- The thumbprint as a hex string to either export or remove.
- See the examples for how to specify the thumbprint.
store_name:
description:
- The store name to use when importing a certificate or searching for a
certificate.
- "C(AddressBook): The X.509 certificate store for other users"
- "C(AuthRoot): The X.509 certificate store for third-party certificate authorities (CAs)"
- "C(CertificateAuthority): The X.509 certificate store for intermediate certificate authorities (CAs)"
- "C(Disallowed): The X.509 certificate store for revoked certificates"
- "C(My): The X.509 certificate store for personal certificates"
- "C(Root): The X.509 certificate store for trusted root certificate authorities (CAs)"
- "C(TrustedPeople): The X.509 certificate store for directly trusted people and resources"
- "C(TrustedPublisher): The X.509 certificate store for directly trusted publishers"
default: My
choices:
- AddressBook
- AuthRoot
- CertificateAuthority
- Disallowed
- My
- Root
- TrustedPeople
- TrustedPublisher
store_location:
description:
- The store location to use when importing a certificate or searching for a
certificate.
choices: [ CurrentUser, LocalMachine ]
default: LocalMachine
password:
description:
- The password of the pkcs12 certificate key.
- This is used when reading a pkcs12 certificate file or the password to
set when C(state=exported) and C(file_type=pkcs12).
- If the pkcs12 file has no password set or no password should be set on
the exported file, do not set this option.
key_exportable:
description:
- Whether to allow the private key to be exported.
- If C(no), then this module and other process will only be able to export
the certificate and the private key cannot be exported.
- Used when C(state=present) only.
type: bool
default: 'yes'
key_storage:
description:
- Specifies where Windows will store the private key when it is imported.
- When set to C(default), the default option as set by Windows is used.
- When set to C(machine), the key is stored in a path accessible by various
users.
- When set to C(user), the key is stored in a path only accessible by the
current user.
- Used when C(state=present) only and cannot be changed once imported.
- See U(https://msdn.microsoft.com/en-us/library/system.security.cryptography.x509certificates.x509keystorageflags.aspx)
for more details.
choices: [ default, machine, user ]
default: default
file_type:
description:
- The file type to export the certificate as when C(state=exported).
- C(der) is a binary ASN.1 encoded file.
- C(pem) is a base64 encoded file of a der file in the OpenSSL form.
- C(pkcs12) (also known as pfx) is a binary container that contains both
the certificate and private key unlike the other options.
- When C(pkcs12) is set and the private key is not exportable or accessible
by the current user, it will throw an exception.
choices: [ der, pem, pkcs12 ]
default: der
notes:
- Some actions on PKCS12 certificates and keys may fail with the error
C(the specified network password is not correct), either use CredSSP or
Kerberos with credential delegation, or use C(become) to bypass these
restrictions.
- The certificates must be located on the Windows host to be set with I(path).
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: import a certificate
win_certificate_store:
path: C:\Temp\cert.pem
state: present
- name: import pfx certificate that is password protected
win_certificate_store:
path: C:\Temp\cert.pfx
state: present
password: VeryStrongPasswordHere!
become: yes
become_method: runas
- name: import pfx certificate without password and set private key as un-exportable
win_certificate_store:
path: C:\Temp\cert.pfx
state: present
key_exportable: no
# usually you don't set this here but it is for illustrative purposes
vars:
ansible_winrm_transport: credssp
- name: remove a certificate based on file thumbprint
win_certificate_store:
path: C:\Temp\cert.pem
state: absent
- name: remove a certificate based on thumbprint
win_certificate_store:
thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
state: absent
- name: remove certificate based on thumbprint is CurrentUser/TrustedPublishers store
win_certificate_store:
thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
state: absent
store_location: CurrentUser
store_name: TrustedPublisher
- name: export certificate as der encoded file
win_certificate_store:
path: C:\Temp\cert.cer
state: exported
file_type: der
- name: export certificate and key as pfx encoded file
win_certificate_store:
path: C:\Temp\cert.pfx
state: exported
file_type: pkcs12
password: AnotherStrongPass!
become: yes
become_method: runas
become_user: SYSTEM
'''
RETURN = r'''
thumbprints:
description: A list of certificate thumbprints that were touched by the
module.
returned: success
type: list
sample: ["BC05633694E675449136679A658281F17A191087"]
'''
| gpl-3.0 |
offbyone/Flexget | flexget/plugins/filter/seen_info_hash.py | 16 | 1975 | from __future__ import unicode_literals, division, absolute_import
from flexget import plugin
from flexget.event import event
from flexget.plugins.filter.seen import FilterSeen
class FilterSeenInfoHash(FilterSeen):
"""Prevents the same torrent from being downloaded twice by remembering the infohash of all downloaded torrents."""
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': ['global', 'local']}
]
}
def __init__(self):
# remember and filter by these fields
self.fields = ['torrent_info_hash']
self.keyword = 'seen_info_hash'
@plugin.priority(180)
def on_task_filter(self, task, config):
# Return if we are disabled.
if config is False:
return
# First make sure all the torrent_info_hash fields are in upper case
for entry in task.entries:
if isinstance(entry.get('torrent_info_hash'), basestring):
entry['torrent_info_hash'] = entry['torrent_info_hash'].upper()
FilterSeen.on_task_filter(self, task, config, remember_rejected=True)
def on_task_modify(self, task, config):
# Return if we are disabled.
if config is False:
return
# Run the filter again after the torrent plugin has populated the infohash
self.on_task_filter(task, config)
# Make sure no duplicates were accepted this run
accepted_infohashes = set()
for entry in task.accepted:
if 'torrent_info_hash' in entry:
infohash = entry['torrent_info_hash']
if infohash in accepted_infohashes:
entry.reject('Already accepted torrent with this infohash once for this task')
else:
accepted_infohashes.add(infohash)
@event('plugin.register')
def register_plugin():
plugin.register(FilterSeenInfoHash, 'seen_info_hash', builtin=True, api_ver=2)
| mit |
JakeBrand/CMPUT410-E6 | v1/lib/python2.7/site-packages/django/core/management/commands/testserver.py | 49 | 2035 | from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--addrport', action='store', dest='addrport',
type='string', default='',
help='port number or ipaddr:port to run the server on'),
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.'),
)
help = 'Runs a development server with data from the given fixture(s).'
args = '[fixture ...]'
requires_system_checks = False
def handle(self, *fixture_labels, **options):
from django.core.management import call_command
from django.db import connection
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
addrport = options.get('addrport')
# Create a test database.
db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive, serialize=False)
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = '\nServer stopped.\nNote that the test database, %r, has not been deleted. You can explore it on your own.' % db_name
use_threading = connection.features.test_db_allows_multiple_connections
call_command(
'runserver',
addrport=addrport,
shutdown_message=shutdown_message,
use_reloader=False,
use_ipv6=options['use_ipv6'],
use_threading=use_threading
)
| apache-2.0 |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/tempfile.py | 74 | 21519 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
import shutil as _shutil
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
self.file.close()
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except Exception:
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
# Handle mkdtemp raising an exception
name = None
_finalizer = None
_closed = False
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message=None):
_shutil.rmtree(name)
if warn_message is not None:
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer is not None:
self._finalizer.detach()
if self.name is not None and not self._closed:
_shutil.rmtree(self.name)
self._closed = True
| lgpl-3.0 |
jrclaramunt/django-cms | cms/utils/copy_plugins.py | 3 | 1676 | # -*- coding: utf-8 -*-
def copy_plugins_to(plugin_list, to_placeholder, to_language=None, parent_plugin_id=None, no_signals=False):
"""
Copies a list of plugins to a placeholder to a language.
"""
old_parent_cache = {}
plugins_ziplist = []
first = True
for old_plugin in plugin_list:
if first:
old_plugin.parent = None
old_plugin.parent_id = None
if to_language:
plugin_language = to_language
else:
plugin_language = old_plugin.language
# do the simple copying
new_plugin = old_plugin.copy_plugin(to_placeholder, plugin_language, old_parent_cache, no_signals=no_signals)
if first:
first = False
if parent_plugin_id:
from cms.models import CMSPlugin
if parent_plugin_id:
new_plugin.parent_id = parent_plugin_id
new_plugin.save()
new_plugin.move(CMSPlugin.objects.get(pk=parent_plugin_id), pos='last-child')
new_plugin = CMSPlugin.objects.get(pk=new_plugin.pk)
plugins_ziplist.append((new_plugin, old_plugin))
# this magic is needed for advanced plugins like Text Plugins that can have
# nested plugins and need to update their content based on the new plugins.
for new_plugin, old_plugin in plugins_ziplist:
new_instance = new_plugin.get_plugin_instance()[0]
if new_instance:
new_instance._no_reorder = True
new_instance.post_copy(old_plugin, plugins_ziplist)
# returns information about originals and copies
return plugins_ziplist
| bsd-3-clause |
gutomaia/gevent-socketio | examples/testapp/testapp/views.py | 12 | 4553 | from pyramid.view import view_config
import gevent
from socketio import socketio_manage
from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from gevent import socket
def index(request):
""" Base view to load our template """
return {}
"""
ACK model:
The client sends a message of the sort:
{type: 'message',
id: 140,
ack: true,
endpoint: '/tobi',
data: ''
}
The 'ack' value is 'true', marking that we want an automatic 'ack' when it
receives the packet. The Node.js version sends the ack itself, without any
server-side code interaction. It dispatches the packet only after sending back
an ack, so the ack isn't really a reply. It's just marking the server received
it, but not if the event/message/json was properly processed.
The automated reply from such a request is:
{type: 'ack',
ackId: '140',
endpoint: '',
args: []
}
Where 'ackId' corresponds to the 'id' of the originating message. Upon
reception of this 'ack' message, the client then looks in an object if there
is a callback function to call associated with this message id (140). If so,
runs it, otherwise, drops the packet.
There is a second way to ask for an ack, sending a packet like this:
{type: 'event',
id: 1,
ack: 'data',
endpoint: '',
name: 'chat',
args: ['', '']
}
{type: 'json',
id: 1,
ack: 'data',
endpoint: '',
data: {a: 'b'}
}
.. the same goes for a 'message' packet, which has the 'ack' equal to 'data'.
When the server receives such a packet, it dispatches the corresponding event
(either the named event specified in an 'event' type packet, or 'message' or
'json, if the type is so), and *adds* as a parameter, in addition to the
'args' passed by the event (or 'data' for 'message'/'json'), the ack() function
to call (it encloses the packet 'id' already). Any number of arguments passed
to that 'ack()' function will be passed on to the client-side, and given as
parameter on the client-side function.
That is the returning 'ack' message, with the data ready to be passed as
arguments to the saved callback on the client side:
{type: 'ack',
ackId: '12',
endpoint: '',
args: ['woot', 'wa']
}
"""
class GlobalIONamespace(BaseNamespace, BroadcastMixin):
def on_chat(self, *args):
self.emit("bob", {'hello': 'world'})
print "Received chat message", args
self.broadcast_event_not_me('chat', *args)
def recv_connect(self):
print "CONNNNNNNN"
self.emit("you_just_connected", {'bravo': 'kid'})
self.spawn(self.cpu_checker_process)
def recv_json(self, data):
self.emit("got_some_json", data)
def on_bob(self, *args):
self.broadcast_event('broadcasted', args)
self.socket['/chat'].emit('bob')
def cpu_checker_process(self):
"""This will be a greenlet"""
ret = os.system("cat /proc/cpu/stuff")
self.emit("cpu_value", ret)
class ChatIONamespace(BaseNamespace, RoomsMixin):
def on_mymessage(self, msg):
print "In on_mymessage"
self.send("little message back")
self.send({'blah': 'blah'}, json=True)
for x in xrange(2):
self.emit("pack", {'the': 'more', 'you': 'can'})
def on_my_callback(self, packet):
return (1, 2)
def on_trigger_server_callback(self, superbob):
def cb():
print "OK, WE WERE CALLED BACK BY THE ACK! THANKS :)"
self.emit('callmeback', 'this is a first param',
'this is the last param', callback=cb)
def cb2(param1, param2):
print "OK, GOT THOSE VALUES BACK BY CB", param1, param2
self.emit('callmeback', 'this is a first param',
'this is the last param', callback=cb2)
def on_rtc_invite(self, sdp):
print "Got an RTC invite, now pushing to others..."
self.emit_to_room('room1', 'rtc_invite', self.session['nickname'],
sdp)
def recv_connect(self):
self.session['nickname'] = 'guest123'
self.join('room1')
def recv_message(self, data):
print "Received a 'message' with data:", data
def on_disconnect_me(self, data):
print "Disconnecting you buddy", data
self.disconnect()
nsmap = {'': GlobalIONamespace,
'/chat': ChatIONamespace}
@view_config(route_name='socket_io')
def socketio_service(request):
""" The view that will launch the socketio listener """
socketio_manage(request.environ, namespaces=nsmap, request=request)
return {}
| bsd-3-clause |
wangyum/tensorflow | tensorflow/contrib/keras/python/keras/layers/simplernn_test.py | 47 | 6799 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras import testing_utils
from tensorflow.python.platform import test
class SimpleRNNLayerTest(test.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_regularization_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
with self.test_session():
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((2, 3, 2))))
self.assertEqual(len(layer.losses), 4)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=keras.constraints.max_norm(0.01),
recurrent_constraint=keras.constraints.max_norm(0.01),
bias_constraint='max_norm')
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.constraints), 3)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
with self.test_session():
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
if __name__ == '__main__':
test.main()
| apache-2.0 |
vbannai/neutron | neutron/tests/unit/vmware/__init__.py | 29 | 2194 | # Copyright 2013 OpenStack Foundation.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from neutron.plugins.vmware.api_client import client as nsx_client
from neutron.plugins.vmware.api_client import eventlet_client
from neutron.plugins.vmware import extensions
import neutron.plugins.vmware.plugin as neutron_plugin
from neutron.plugins.vmware.vshield.common import VcnsApiClient as vcnsapi
from neutron.plugins.vmware.vshield import vcns
import neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver
plugin = neutron_plugin.NsxPlugin
service_plugin = neutron_plugin.NsxServicePlugin
api_client = nsx_client.NsxApiClient
evt_client = eventlet_client.EventletApiClient
vcns_class = vcns.Vcns
vcns_driver = vcnsdriver.VcnsDriver
vcns_api_helper = vcnsapi.VcnsApiHelper
STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc')
NSXEXT_PATH = os.path.dirname(extensions.__file__)
NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__)
PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__)
SERVICE_PLUGIN_NAME = '%s.%s' % (service_plugin.__module__,
service_plugin.__name__)
CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__)
VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__)
VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__)
VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__, vcns_api_helper.__name__)
def get_fake_conf(filename):
return os.path.join(STUBS_PATH, filename)
def nsx_method(method_name, module_name='nsxlib'):
return '%s.%s.%s' % ('neutron.plugins.vmware', module_name, method_name)
| apache-2.0 |
devinbalkind/eden | modules/ClimateDataPortal/import_tabbed_data.py | 56 | 11039 | #!/usr/bin/python
ClimateDataPortal = local_import("ClimateDataPortal")
InsertChunksWithoutCheckingForExistingReadings = local_import(
"ClimateDataPortal.InsertChunksWithoutCheckingForExistingReadings"
).InsertChunksWithoutCheckingForExistingReadings
def get_or_create(dict, key, creator):
try:
value = dict[key]
except KeyError:
value = dict[key] = creator()
return value
class Readings(object):
"Stores a set of readings for a single place"
def __init__(
self,
place_id,
missing_data_marker,
converter,
writer,
time_period,
maximum = None,
minimum = None,
):
self.missing_data_marker = missing_data_marker
self.maximum = maximum
self.minimum = minimum
self.converter = converter
self.place_id = place_id
self.writer = writer
self.aggregated_values = {}
self.time_period = time_period
def __repr__(self):
return "%s for place %i" % (
self.writer,
self.place_id
)
def add_reading(self, year, month, day, reading, out_of_range):
if reading != self.missing_data_marker:
reading = self.converter(reading)
if (
(self.minimum is not None and reading < self.minimum) or
(self.maximum is not None and reading > self.maximum)
):
out_of_range(reading)
else:
self.writer(
self.time_period(year, month, day),
self.place_id,
reading
)
def done(self):
self.writer.done()
def import_tabbed_readings(
folder,
start_station,
end_station,
suffix,
prefix,
fields,
clear_existing_data,
separator,
missing_data_marker
):
"""
Expects a folder containing files with name rtXXXX.txt
each file contains lines of the form e.g.:
1978\t1\t1\t0\t-99.9\t-99.9
representing year, month, day, rainfall(mm), minimum and maximum temperature
"""
import os
assert os.path.isdir(folder), "%s is not a folder!" % folder
Observed = ClimateDataPortal.sample_codes["Observed"]
from decimal import Decimal
import datetime
available_tables = {}
for sample_table_spec in db(db.climate_sample_table_spec).select():
if sample_table_spec.sample_type_code == Observed:
available_tables[sample_table_spec.name] = ClimateDataPortal.sample_table_id(
sample_table_spec.id
)
field_order = []
def readings_lambda(sample_table, input_units):
conversion = ClimateDataPortal.units_in_out[input_units]["in"]
return (lambda missing_data_marker, converter, place_id:
Readings(
place_id,
missing_data_marker = missing_data_marker,
converter = (lambda value: conversion(float(value))),
time_period = ClimateDataPortal.year_month_day_to_day_number,
maximum = None,
minimum = None,
writer = InsertChunksWithoutCheckingForExistingReadings(sample_table, db)
)
)
date_format = {}
field_positions = []
for field, position in zip(fields, range(len(fields))):
if field is not "UNUSED":
if field in ("year", "month", "day"):
date_format[field+"_pos"] = position
else:
field, input_units = field.rsplit(" ", 1)
try:
sample_table = available_tables[field]
except KeyError:
raise Exception(
"'%s' not recognised, available options are: "
"year, month, day, %s\n"
"You can add new tables using add_table.py" % (
field,
", ".join(map("\"%s\"".__mod__, available_tables.keys()))
)
)
else:
if clear_existing_data:
print "Clearing "+sample_table
sample_table.clear()
field_positions.append(
(readings_lambda(sample_table, input_units), position)
)
for field in ("year", "month", "day"):
assert field+"_pos" in date_format, "%s is not specified in --fields" % field
query_terms = []
if start_station is not None:
query_terms.append(climate_station_id.station_id >= start_station)
if end_station is not None:
query_terms.append(climate_station_id.station_id <= end_station)
if not query_terms:
query = climate_station_id
else:
import operator
query = reduce(operator.and_, query_terms)
stations = list(db(query).select())
if stations:
for station in stations:
station_id = station.station_id
print station_id
data_file_path = os.path.join(
folder,
(prefix+"%04i"+suffix) % station_id
)
if not os.path.exists(data_file_path):
print "%s not found" % data_file_path
else:
variable_positions = []
for field, position in field_positions:
variable_positions.append(
(
field(
missing_data_marker = missing_data_marker,
converter = Decimal,
place_id = station.id
),
position
)
)
import_data_in_file(
data_file_path,
tuple(variable_positions),
separator,
**date_format
)
db.commit()
else:
print "No stations! Import using import_stations.py"
def out_of_range(year, month, day, reading):
print "%s-%s-%s: %s out of range" % (
year, month, day, reading
)
def import_data_row(year, month, day, data):
for variable, field_string in data:
variable.add_reading(
year, month, day,
field_string,
out_of_range = out_of_range
)
def import_data_in_file(
data_file_path,
variable_positions,
separator,
year_pos,
month_pos,
day_pos,
):
# print variables
import decimal
try:
line_number = 1
for line in open(data_file_path, "r").readlines():
if line:
field_strings = line.split(separator)
if field_strings.__len__() > 0:
try:
field = field_strings.__getitem__
import_data_row(
int(field(year_pos)),
int(field(month_pos)),
int(field(day_pos)),
tuple((variable, field(position)) for variable, position in variable_positions)
)
except (IndexError, ValueError, decimal.InvalidOperation), exception:
print line, "line", line_number, ":", exception
line_number += 1
for variable, position in variable_positions:
variable.done()
except NotImplemented:
print line
raise
def main(argv):
import argparse
import os
parser = argparse.ArgumentParser(
description = "Imports observed climate data from tab-delimited files in a folder.",
prog= argv[0],
usage="""
<web2py preamble to run script> \\
%(prog)s \\
--folder path_to/folder [options]
Use flag -h | --help for extra help on options.
The file names must follow a convention of prefix + station_id + suffix.
e.g.:
path_to
`--folder
|--rt0100.txt
|--rt0101.txt
|--...
`--rt9999.txt
* Other files in this folder will not be read.
* Files not corresponding to imported stations will not be read.
* You must add tables for the data being import before it can be imported.
Use add_table.py to do this.
Examples: *(IN ROOT OF APP FOLDER)*
Import all files in a folder, clearing existing data :
python ./run.py \\
%(prog)s \\
--folder path_to/folder --clear_existing_data \\
--fields year month day "Rainfall mm" "Max Temp C" "Min Temp C"
Import a range of stations:
python ./run.py \\
%(prog)s \\
--folder path_to/folder --from 0 --to 500 \\
--fields year month day "Rainfall mm" "Max Temp C" "Min Temp C"
Only import Rainfall:
python ./run.py \\
%(prog)s \\
--folder path_to/folder \\
--fields year month day "Rainfall mm" UNUSED UNUSED
""")
parser.add_argument(
"--folder",
required = True,
help="Folder in which to search for files."
)
parser.add_argument(
"--clear_existing_data",
help="Truncate database tables first."
)
parser.add_argument(
"--start_station",
type=int,
default = None,
help="Station number to start from."
)
parser.add_argument(
"--end_station",
type=int,
default = None,
help="""Station number to end on
(inclusive, i.e. import data from this station's file too)."""
)
parser.add_argument(
"--prefix",
default = "rt",
help="File name prefix e.g. '%(default)s' (default)"
)
parser.add_argument(
"--suffix",
default = ".txt",
help="File name suffix e.g. '%(default)s' (default)."
)
parser.add_argument(
"--separator",
default = None,
help="Field separator e.g. '\t' (default is None - any whitespace)."
)
parser.add_argument(
"--missing_data_marker",
default = "-99.9",
help = """Missing data marker.
Interpret this as missing data and do not import anything for that date.
"""
)
parser.add_argument(
"--fields",
required = True,
nargs = "+",
help="""List of fields in file, e.g.:
year month day "Rainfall mm" "Max Temp Celsius" "Min Temp Celsius"
year, month and day are used to parse the date.
The other fields name tables to import data into, mapping by position.
All fields must be accounted for. Any unused fields should be marked as UNUSED.
"""
)
args = parser.parse_args(argv[1:])
kwargs = {}
for key, value in args.__dict__.iteritems():
if not key.startswith("_"):
kwargs[key] = value
import_tabbed_readings(**kwargs)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
| mit |
kkouer/PcGcs | Lib/site-packages/numpy/core/code_generators/ufunc_docstrings.py | 57 | 85797 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
y : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Return a bool-type array, the same shape as `x`, True where ``x ==
+/-inf``, False everywhere else.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or bool-type ndarray
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
shape as the input and the values are True where the
corresponding element of the input is positive or negative
infinity; elsewhere the values are False. If a second argument
was supplied the result is stored there. If the type of that array
is a numeric type the result is represented as zeros and ones, if
the type is boolean then as False and True, respectively.
The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like` for further details.
See Also
--------
zeros_like, ones
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output.
See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
(A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.)
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| gpl-3.0 |
tmenjo/cinder-2015.1.1 | cinder/backup/drivers/nfs.py | 2 | 6196 | # Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses NFS storage as the backend."""
import os
import os.path
from oslo_config import cfg
from oslo_log import log as logging
from cinder.backup import chunkeddriver
from cinder.brick.remotefs import remotefs as remotefs_brick
from cinder import exception
from cinder.i18n import _
from cinder import utils
LOG = logging.getLogger(__name__)
SHA_SIZE = 32768
# Multiple of SHA_SIZE, close to a characteristic OS max file system size.
BACKUP_FILE_SIZE = 61035 * 32768
nfsbackup_service_opts = [
cfg.IntOpt('backup_file_size',
default=BACKUP_FILE_SIZE,
help='The maximum size in bytes of the files used to hold '
'backups. If the volume being backed up exceeds this '
'size, then it will be backed up into multiple files. '
'backup_file_size must be a multiple of '
'backup_sha_block_size_bytes.'),
cfg.IntOpt('backup_sha_block_size_bytes',
default=SHA_SIZE,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_file_size '
'has to be multiple of backup_sha_block_size_bytes.'),
cfg.BoolOpt('backup_enable_progress_timer',
default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the backend storage. The '
'default value is True to enable the timer.'),
cfg.StrOpt('backup_mount_point_base',
default='$state_path/backup_mount',
help='Base dir containing mount point for NFS share.'),
cfg.StrOpt('backup_share',
default=None,
help='NFS share in fqdn:path, ipv4addr:path, '
'or "[ipv6addr]:path" format.'),
cfg.StrOpt('backup_mount_options',
default=None,
help=('Mount options passed to the NFS client. See NFS '
'man page for details.')),
cfg.StrOpt('backup_container',
help='Custom container to use for backups.'),
]
CONF = cfg.CONF
CONF.register_opts(nfsbackup_service_opts)
class NFSBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete using NFS supplied repository."""
def __init__(self, context, db_driver=None):
self._check_configuration()
chunk_size_bytes = CONF.backup_file_size
sha_block_size_bytes = CONF.backup_sha_block_size_bytes
backup_default_container = CONF.backup_container
enable_progress_timer = CONF.backup_enable_progress_timer
super(NFSBackupDriver, self).__init__(context, chunk_size_bytes,
sha_block_size_bytes,
backup_default_container,
enable_progress_timer,
db_driver)
self.backup_mount_point_base = CONF.backup_mount_point_base
self.backup_share = CONF.backup_share
self.mount_options = CONF.backup_mount_options or {}
self.backup_path = self._init_backup_repo_path()
LOG.debug("Using NFS backup repository: %s", self.backup_path)
@staticmethod
def _check_configuration():
"""Raises error if any required configuration flag is missing."""
required_flags = ['backup_share']
for flag in required_flags:
if not getattr(CONF, flag, None):
raise exception.ConfigNotFound(_(
'Required flag %s is not set') % flag)
def _init_backup_repo_path(self):
remotefsclient = remotefs_brick.RemoteFsClient(
'nfs',
utils.get_root_helper(),
nfs_mount_point_base=self.backup_mount_point_base,
nfs_mount_options=self.mount_options)
remotefsclient.mount(self.backup_share)
return remotefsclient.get_mount_point(self.backup_share)
def update_container_name(self, backup, container):
if container is not None:
return container
id = backup['id']
return os.path.join(id[0:2], id[2:4], id)
def put_container(self, container):
path = os.path.join(self.backup_path, container)
if not os.path.exists(path):
os.makedirs(path)
os.chmod(path, 0o770)
def get_container_entries(self, container, prefix):
path = os.path.join(self.backup_path, container)
return [i for i in os.listdir(path) if i.startswith(prefix)]
def get_object_writer(self, container, object_name, extra_metadata=None):
path = os.path.join(self.backup_path, container, object_name)
file = open(path, 'w')
os.chmod(path, 0o660)
return file
def get_object_reader(self, container, object_name, extra_metadata=None):
path = os.path.join(self.backup_path, container, object_name)
return open(path, 'r')
def delete_object(self, container, object_name):
# TODO(tbarron): clean up the container path if it is empty
path = os.path.join(self.backup_path, container, object_name)
os.remove(path)
def _generate_object_name_prefix(self, backup):
return 'backup'
def get_extra_metadata(self, backup, volume):
return None
def get_backup_driver(context):
return NFSBackupDriver(context)
| apache-2.0 |
bdestombe/flopy-1 | flopy/utils/mflistfile.py | 1 | 25207 | """
This is a set of classes for reading budget information out of MODFLOW-style
listing files. Cumulative and incremental budgets are returned as numpy
recarrays, which can then be easily plotted.
"""
import collections
import os
import re
import sys
from datetime import timedelta
import numpy as np
from ..utils.utils_def import totim_to_datetime
class ListBudget(object):
"""
MODFLOW family list file handling
Parameters
----------
file_name : str
the list file name
budgetkey : str
the text string identifying the budget table. (default is None)
timeunit : str
the time unit to return in the recarray. (default is 'days')
Notes
-----
The ListBudget class should not be instantiated directly. Access is
through derived classes: MfListBudget (MODFLOW), SwtListBudget (SEAWAT)
and SwrListBudget (MODFLOW with the SWR process)
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incremental, cumulative = mf_list.get_budget()
>>> df_in, df_out = mf_list.get_dataframes(start_datetime="10-21-2015")
"""
def __init__(self, file_name, budgetkey=None, timeunit='days'):
# Set up file reading
assert os.path.exists(file_name)
self.file_name = file_name
if sys.version_info[0] == 2:
self.f = open(file_name, 'r')
elif sys.version_info[0] == 3:
self.f = open(file_name, 'r', encoding='ascii', errors='replace')
self.tssp_lines = 0
# Assign the budgetkey, which should have been overriden
if budgetkey is None:
self.set_budget_key()
else:
self.budgetkey = budgetkey
self.totim = []
self.timeunit = timeunit
self.idx_map = []
self.entries = []
self.null_entries = []
self.time_line_idx = 20
if timeunit.upper() == 'SECONDS':
self.timeunit = 'S'
self.time_idx = 0
elif timeunit.upper() == 'MINUTES':
self.timeunit = 'M'
self.time_idx = 1
elif timeunit.upper() == 'HOURS':
self.timeunit = 'H'
self.time_idx = 2
elif timeunit.upper() == 'DAYS':
self.timeunit = 'D'
self.time_idx = 3
elif timeunit.upper() == 'YEARS':
self.timeunit = 'Y'
self.time_idx = 4
else:
raise Exception('need to reset time_idxs attribute to '
'use units other than days and check usage of '
'timedelta')
# Fill budget recarrays
self._load()
self._isvalid = False
if len(self.idx_map) > 0:
self._isvalid = True
# Close the open file
self.f.close()
# return
return
def set_budget_key(self):
raise Exception('Must be overridden...')
def isvalid(self):
"""
Get a boolean indicating if budget data are available in the file.
Returns
-------
out : boolean
Boolean indicating if budget data are available in the file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> valid = mf_list.isvalid()
"""
return self._isvalid
def get_record_names(self):
"""
Get a list of water budget record names in the file.
Returns
-------
out : list of strings
List of unique text names in the binary file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> names = mf_list.get_record_names()
"""
if not self._isvalid:
return None
return self.inc.dtype.names
def get_times(self):
"""
Get a list of unique water budget times in the list file.
Returns
-------
out : list of floats
List contains unique water budget simulation times (totim) in list file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> times = mf_list.get_times()
"""
if not self._isvalid:
return None
return self.inc['totim'].tolist()
def get_kstpkper(self):
"""
Get a list of unique stress periods and time steps in the list file
water budgets.
Returns
----------
out : list of (kstp, kper) tuples
List of unique kstp, kper combinations in list file. kstp and
kper values are zero-based.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> kstpkper = mf_list.get_kstpkper()
"""
if not self._isvalid:
return None
kstpkper = []
for kstp, kper in zip(self.inc['time_step'],
self.inc['stress_period']):
kstpkper.append((kstp, kper))
return kstpkper
def get_incremental(self, names=None):
"""
Get a recarray with the incremental water budget items in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarray
Numpy recarray with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incremental = mf_list.get_incremental()
"""
if not self._isvalid:
return None
if names is None:
return self.inc
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.inc[names].view(np.recarray)
def get_cumulative(self, names=None):
"""
Get a recarray with the cumulative water budget items in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarray
Numpy recarray with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> cumulative = mf_list.get_cumulative()
"""
if not self._isvalid:
return None
if names is None:
return self.cum
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.cum[names].view(np.recarray)
def get_budget(self, names=None):
"""
Get the recarrays with the incremental and cumulative water budget items
in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarrays
Numpy recarrays with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period. A
separate recarray is returned for the incremental and cumulative
water budget entries.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> budget = mf_list.get_budget()
"""
if not self._isvalid:
return None
if names is None:
return self.inc, self.cum
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.inc[names].view(np.recarray), self.cum[names].view(
np.recarray)
def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False):
"""
Get water budget data from the list file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
(default is None).
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values. (default is None).
totim : float
The simulation time. (default is None).
incremental : bool
Boolean flag used to determine if incremental or cumulative water
budget data for the specified conditions will be returned. If
incremental=True, incremental water budget data will be returned.
If incremental=False, cumulative water budget data will be
returned. (default is False).
Returns
-------
data : numpy recarray
Array has size (number of budget items, 3). Recarray names are 'index',
'value', 'name'.
See Also
--------
Notes
-----
if both kstpkper and totim are None, will return the last entry
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import flopy
>>> mf_list = flopy.utils.MfListBudget("my_model.list")
>>> data = mf_list.get_data(kstpkper=(0,0))
>>> plt.bar(data['index'], data['value'])
>>> plt.xticks(data['index'], data['name'], rotation=45, size=6)
>>> plt.show()
"""
if not self._isvalid:
return None
ipos = None
if kstpkper is not None:
try:
ipos = self.get_kstpkper().index(kstpkper)
except:
pass
elif totim is not None:
try:
ipos = self.get_times().index(totim)
except:
pass
elif idx is not None:
ipos = idx
else:
ipos = -1
if ipos is None:
print('Could not find specified condition.')
print(' kstpkper = {}'.format(kstpkper))
print(' totim = {}'.format(totim))
return None
if incremental:
t = self.inc[ipos]
else:
t = self.cum[ipos]
dtype = np.dtype(
[('index', np.int32), ('value', np.float32), ('name', '|S25')])
v = np.recarray(shape=(len(self.inc.dtype.names[3:])), dtype=dtype)
for i, name in enumerate(self.inc.dtype.names[3:]):
mult = 1.
if '_OUT' in name:
mult = -1.
v[i]['index'] = i
v[i]['value'] = mult * t[name]
v[i]['name'] = name
return v
def get_dataframes(self, start_datetime='1-1-1970',diff=False):
"""
Get pandas dataframes with the incremental and cumulative water budget
items in the list file.
Parameters
----------
start_datetime : str
If start_datetime is passed as None, the rows are indexed on totim.
Otherwise, a DatetimeIndex is set. (default is 1-1-1970).
Returns
-------
out : panda dataframes
Pandas dataframes with the incremental and cumulative water budget
items in list file. A separate pandas dataframe is returned for the
incremental and cumulative water budget entries.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incrementaldf, cumulativedf = mf_list.get_dataframes()
"""
try:
import pandas as pd
except Exception as e:
raise Exception(
"ListBudget.get_dataframe() error import pandas: " + \
str(e))
if not self._isvalid:
return None
totim = self.get_times()
if start_datetime is not None:
totim = totim_to_datetime(totim,
start=pd.to_datetime(start_datetime),
timeunit=self.timeunit)
df_flux = pd.DataFrame(self.inc, index=totim).loc[:, self.entries]
df_vol = pd.DataFrame(self.cum, index=totim).loc[:, self.entries]
if not diff:
return df_flux, df_vol
else:
in_names = [col for col in df_flux.columns if col.endswith("_IN")]
out_names = [col for col in df_flux.columns if col.endswith("_OUT")]
#print(in_names,out_names)
#print(df_flux.columns)
base_names = [name.replace("_IN",'') for name in in_names]
for name in base_names:
in_name = name + "_IN"
out_name = name + "_OUT"
df_flux.loc[:,name.lower()] = df_flux.loc[:,in_name] - df_flux.loc[:,out_name]
df_flux.pop(in_name)
df_flux.pop(out_name)
df_vol.loc[:,name.lower()] = df_vol.loc[:,in_name] - df_vol.loc[:,out_name]
df_vol.pop(in_name)
df_vol.pop(out_name)
cols = list(df_flux.columns)
cols.sort()
cols = [col.lower() for col in cols]
df_flux.columns = cols
df_vol.columns = cols
return df_flux, df_vol
def _build_index(self, maxentries):
self.idx_map = self._get_index(maxentries)
return
def _get_index(self, maxentries):
# --parse through the file looking for matches and parsing ts and sp
idxs = []
l_count = 1
while True:
seekpoint = self.f.tell()
line = self.f.readline()
if line == '':
break
if self.budgetkey in line:
for l in range(self.tssp_lines):
line = self.f.readline()
try:
ts, sp = self._get_ts_sp(line)
except:
print('unable to cast ts,sp on line number', l_count,
' line: ', line)
break
# print('info found for timestep stress period',ts,sp)
idxs.append([ts, sp, seekpoint])
if maxentries and len(idxs) >= maxentries:
break
return idxs
def _seek_to_string(self, s):
"""
Parameters
----------
s : str
Seek through the file to the next occurrence of s. Return the
seek location when found.
Returns
-------
seekpoint : int
Next location of the string
"""
while True:
seekpoint = self.f.tell()
line = self.f.readline()
if line == '':
break
if s in line:
break
return seekpoint
def _get_ts_sp(self, line):
"""
From the line string, extract the time step and stress period numbers.
"""
# Old method. Was not generic enough.
# ts = int(line[self.ts_idxs[0]:self.ts_idxs[1]])
# sp = int(line[self.sp_idxs[0]:self.sp_idxs[1]])
# Get rid of nasty things
line = line.replace(',', '')
searchstring = 'TIME STEP'
idx = line.index(searchstring) + len(searchstring)
ll = line[idx:].strip().split()
ts = int(ll[0])
searchstring = 'STRESS PERIOD'
idx = line.index(searchstring) + len(searchstring)
ll = line[idx:].strip().split()
sp = int(ll[0])
return ts, sp
def _set_entries(self):
if len(self.idx_map) < 1:
return None, None
if len(self.entries) > 0:
raise Exception('entries already set:' + str(self.entries))
if not self.idx_map:
raise Exception('must call build_index before call set_entries')
try:
incdict, cumdict = self._get_sp(self.idx_map[0][0],
self.idx_map[0][1],
self.idx_map[0][2])
except:
raise Exception('unable to read budget information from first '
'entry in list file')
self.entries = incdict.keys()
null_entries = collections.OrderedDict()
incdict = collections.OrderedDict()
cumdict = collections.OrderedDict()
for entry in self.entries:
incdict[entry] = []
cumdict[entry] = []
null_entries[entry] = np.NaN
self.null_entries = [null_entries, null_entries]
return incdict, cumdict
def _load(self, maxentries=None):
self._build_index(maxentries)
incdict, cumdict = self._set_entries()
if incdict is None and cumdict is None:
return
totim = []
for ts, sp, seekpoint in self.idx_map:
tinc, tcum = self._get_sp(ts, sp, seekpoint)
for entry in self.entries:
incdict[entry].append(tinc[entry])
cumdict[entry].append(tcum[entry])
# Get the time for this record
seekpoint = self._seek_to_string('TIME SUMMARY AT END')
tslen, sptim, tt = self._get_totim(ts, sp, seekpoint)
totim.append(tt)
# get kstp and kper
idx_array = np.array(self.idx_map)
# build dtype for recarray
dtype_tups = [('totim', np.float32), ("time_step", np.int32),
("stress_period", np.int32)]
for entry in self.entries:
dtype_tups.append((entry, np.float32))
dtype = np.dtype(dtype_tups)
# create recarray
nentries = len(incdict[entry])
self.inc = np.recarray(shape=(nentries,), dtype=dtype)
self.cum = np.recarray(shape=(nentries,), dtype=dtype)
# fill each column of the recarray
for entry in self.entries:
self.inc[entry] = incdict[entry]
self.cum[entry] = cumdict[entry]
# file the totim, time_step, and stress_period columns for the
# incremental and cumulative recarrays (zero-based kstp,kper)
self.inc['totim'] = np.array(totim)[:]
self.inc["time_step"] = idx_array[:, 0] - 1
self.inc["stress_period"] = idx_array[:, 1] - 1
self.cum['totim'] = np.array(totim)[:]
self.cum["time_step"] = idx_array[:, 0] - 1
self.cum["stress_period"] = idx_array[:, 1] - 1
return
def _get_sp(self, ts, sp, seekpoint):
self.f.seek(seekpoint)
# --read to the start of the "in" budget information
while True:
line = self.f.readline()
if line == '':
print(
'end of file found while seeking budget information for ts,sp',
ts, sp)
return self.null_entries
# --if there are two '=' in this line, then it is a budget line
if len(re.findall('=', line)) == 2:
break
tag = 'IN'
incdict = collections.OrderedDict()
cumdict = collections.OrderedDict()
while True:
if line == '':
# raise Exception('end of file found while seeking budget information')
print(
'end of file found while seeking budget information for ts,sp',
ts, sp)
return self.null_entries
if len(re.findall('=', line)) == 2:
try:
entry, flux, cumu = self._parse_budget_line(line)
except e:
print('error parsing budget line in ts,sp', ts, sp)
return self.null_entries
if flux is None:
print(
'error casting in flux for', entry,
' to float in ts,sp',
ts, sp)
return self.null_entries
if cumu is None:
print(
'error casting in cumu for', entry,
' to float in ts,sp',
ts, sp)
return self.null_entries
if entry.endswith(tag.upper()):
if ' - ' in entry.upper():
key = entry.replace(' ', '')
else:
key = entry.replace(' ', '_')
elif 'PERCENT DISCREPANCY' in entry.upper():
key = entry.replace(' ', '_')
else:
key = '{}_{}'.format(entry.replace(' ', '_'), tag)
incdict[key] = flux
cumdict[key] = cumu
else:
if 'OUT:' in line.upper():
tag = 'OUT'
line = self.f.readline()
if entry.upper() == 'PERCENT DISCREPANCY':
break
return incdict, cumdict
def _parse_budget_line(self, line):
# get the budget item name
entry = line.strip().split('=')[0].strip()
# get the cumulative string
idx = line.index('=') + 1
line2 = line[idx:]
ll = line2.strip().split()
cu_str = ll[0]
idx = line2.index('=') + 1
fx_str = line2[idx:].strip()
#
# cu_str = line[self.cumu_idxs[0]:self.cumu_idxs[1]]
# fx_str = line[self.flux_idxs[0]:self.flux_idxs[1]]
flux, cumu = None, None
try:
cumu = float(cu_str)
except:
if 'NAN' in cu_str.strip().upper():
cumu = np.NaN
try:
flux = float(fx_str)
except:
if 'NAN' in fx_str.strip().upper():
flux = np.NaN
return entry, flux, cumu
def _get_totim(self, ts, sp, seekpoint):
self.f.seek(seekpoint)
# --read header lines
ihead = 0
while True:
line = self.f.readline()
ihead += 1
if line == '':
print(
'end of file found while seeking time information for ts,sp',
ts, sp)
return np.NaN, np.NaN, np.Nan
elif ihead == 2 and 'SECONDS MINUTES HOURS DAYS YEARS' not in line:
break
elif '-----------------------------------------------------------' in line:
line = self.f.readline()
break
tslen = self._parse_time_line(line)
if tslen == None:
print('error parsing tslen for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
sptim = self._parse_time_line(self.f.readline())
if sptim == None:
print('error parsing sptim for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
totim = self._parse_time_line(self.f.readline())
if totim == None:
print('error parsing totim for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
return tslen, sptim, totim
def _parse_time_line(self, line):
if line == '':
print('end of file found while parsing time information')
return None
try:
time_str = line[self.time_line_idx:]
raw = time_str.split()
idx = self.time_idx
# catch case where itmuni is undefined
# in this case, the table format is different
try:
v = float(raw[0])
except:
time_str = line[45:]
raw = time_str.split()
idx = 0
tval = float(raw[idx])
except:
print('error parsing tslen information', time_str)
return None
return tval
class SwtListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'MASS BUDGET FOR ENTIRE MODEL'
return
class MfListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC BUDGET FOR ENTIRE MODEL'
return
class MfusgListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC BUDGET FOR ENTIRE MODEL'
return
class SwrListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC SURFACE WATER BUDGET FOR ENTIRE MODEL'
self.tssp_lines = 1
return
| bsd-3-clause |
thiagopnts/servo | components/script/dom/bindings/codegen/parser/tests/test_exposed_extended_attribute.py | 127 | 6466 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[PrimaryGlobal] interface Foo {};
[Global=(Bar1,Bar2)] interface Bar {};
[Global=Baz2] interface Baz {};
[Exposed=(Foo,Bar1)]
interface Iface {
void method1();
[Exposed=Bar1]
readonly attribute any attr;
};
[Exposed=Foo]
partial interface Iface {
void method2();
};
""")
results = parser.finish()
harness.check(len(results), 5, "Should know about five things");
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should have an interface here");
members = iface.members
harness.check(len(members), 3, "Should have three members")
harness.ok(members[0].exposureSet == set(["Foo", "Bar"]),
"method1 should have the right exposure set")
harness.ok(members[0]._exposureGlobalNames == set(["Foo", "Bar1"]),
"method1 should have the right exposure global names")
harness.ok(members[1].exposureSet == set(["Bar"]),
"attr should have the right exposure set")
harness.ok(members[1]._exposureGlobalNames == set(["Bar1"]),
"attr should have the right exposure global names")
harness.ok(members[2].exposureSet == set(["Foo"]),
"method2 should have the right exposure set")
harness.ok(members[2]._exposureGlobalNames == set(["Foo"]),
"method2 should have the right exposure global names")
harness.ok(iface.exposureSet == set(["Foo", "Bar"]),
"Iface should have the right exposure set")
harness.ok(iface._exposureGlobalNames == set(["Foo", "Bar1"]),
"Iface should have the right exposure global names")
parser = parser.reset()
parser.parse("""
[PrimaryGlobal] interface Foo {};
[Global=(Bar1,Bar2)] interface Bar {};
[Global=Baz2] interface Baz {};
interface Iface2 {
void method3();
};
""")
results = parser.finish()
harness.check(len(results), 4, "Should know about four things");
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should have an interface here");
members = iface.members
harness.check(len(members), 1, "Should have one member")
harness.ok(members[0].exposureSet == set(["Foo"]),
"method3 should have the right exposure set")
harness.ok(members[0]._exposureGlobalNames == set(["Foo"]),
"method3 should have the right exposure global names")
harness.ok(iface.exposureSet == set(["Foo"]),
"Iface2 should have the right exposure set")
harness.ok(iface._exposureGlobalNames == set(["Foo"]),
"Iface2 should have the right exposure global names")
parser = parser.reset()
parser.parse("""
[PrimaryGlobal] interface Foo {};
[Global=(Bar1,Bar2)] interface Bar {};
[Global=Baz2] interface Baz {};
[Exposed=Foo]
interface Iface3 {
void method4();
};
[Exposed=(Foo,Bar1)]
interface Mixin {
void method5();
};
Iface3 implements Mixin;
""")
results = parser.finish()
harness.check(len(results), 6, "Should know about six things");
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should have an interface here");
members = iface.members
harness.check(len(members), 2, "Should have two members")
harness.ok(members[0].exposureSet == set(["Foo"]),
"method4 should have the right exposure set")
harness.ok(members[0]._exposureGlobalNames == set(["Foo"]),
"method4 should have the right exposure global names")
harness.ok(members[1].exposureSet == set(["Foo", "Bar"]),
"method5 should have the right exposure set")
harness.ok(members[1]._exposureGlobalNames == set(["Foo", "Bar1"]),
"method5 should have the right exposure global names")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Exposed=Foo]
interface Bar {
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on interface.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Bar {
[Exposed=Foo]
readonly attribute bool attr;
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on attribute.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Bar {
[Exposed=Foo]
void operation();
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on operation.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Bar {
[Exposed=Foo]
const long constant = 5;
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on constant.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global] interface Foo {};
[Global] interface Bar {};
[Exposed=Foo]
interface Baz {
[Exposed=Bar]
void method();
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on member exposed where its interface is not.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global] interface Foo {};
[Global] interface Bar {};
[Exposed=Foo]
interface Baz {
void method();
};
[Exposed=Bar]
interface Mixin {};
Baz implements Mixin;
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on LHS of implements being exposed where RHS is not.")
| mpl-2.0 |
ericdill/travis-little-helper | template.py | 1 | 3049 | from argparse import ArgumentParser
import yaml
from jinja2 import Environment, FileSystemLoader
import os
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'travis-template')
def main():
p = ArgumentParser()
p.add_argument(
"-tc", "--travis-config",
help="The yaml file specifying the configuration details for the travis yaml file",
nargs="?",
)
p.add_argument(
"-o", "--output-dir",
help="The location to output the completed .travis.yml file. Will be output to \"output-dir/.travis.yml\"",
nargs="?",
default="."
)
p.set_defaults(func=execute)
args = p.parse_args()
execute(args, p)
def execute(args, p):
output_dir = args.output_dir
input_config_yaml = args.travis_config
execute_programmatically(input_config_yaml, output_dir)
def nest_all_the_loops(iterable, matrix=None, matrices=None):
if matrix is None:
matrix = {}
if matrices is None:
matrices = []
local_iterable = iterable.copy()
try:
lib, versions = local_iterable.pop(0)
except IndexError:
matrices.append(matrix.copy())
return
for version in versions:
matrix[lib] = version
nest_all_the_loops(local_iterable, matrix, matrices)
return matrices
def execute_programmatically(input_config_yaml, output_dir):
print("input_config_yaml = %s" % input_config_yaml)
print("output_directory = %s" % output_dir)
travis_config = yaml.load(open(input_config_yaml, 'r'))
print('travis_config = %s' % travis_config)
# turn the env section of the travis config into the outer product of environments
env = travis_config.get('env', {})
print('env from yaml = %s', env)
env_list = [(k, v) for k, v in env.items()]
print('library matrix = %s' % env_list)
if env_list:
env_outer_prod = nest_all_the_loops(env_list.copy())
matrix = []
for mat in env_outer_prod:
repos = ' '.join(['%s="{%s}"' % (k.upper(), k) for k in sorted(mat.keys()) if k != 'python'])
matrix.append(('%s' % repos).format(**mat))
print('env matrix = %s' % matrix)
travis_config['matrix'] = matrix
travis_config['env'] = {k.lower(): k.upper() for k in env.keys()}
#explicitly format the allow_failures section
allow_failures = travis_config.get('allow_failures', {})
allow_failure_rows = ["%s: %s" % (k, v) for row in allow_failures for k, v in row.items()]
travis_config['allow_failure_rows'] = allow_failure_rows
# create the jinja environment
jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_DIR))
template = jinja_env.get_template('nsls2.tmpl')
try:
os.makedirs(output_dir)
except FileExistsError:
# the file, uh, already exists
pass
travis_yml = template.render(**travis_config)
travis_fname = os.path.join(output_dir, '.travis.yml')
with open(travis_fname, 'w') as f:
f.write(travis_yml)
if __name__ == "__main__":
main()
| gpl-3.0 |
renner/spacewalk | client/debian/packages-already-in-debian/rhn-client-tools/src/bin/rhn_check.py | 17 | 13876 | #!/usr/bin/python
#
# Python client for checking periodically for posted actions
# on the Red Hat Network servers.
#
# Copyright (c) 2000--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
import os
import sys
import socket
from OpenSSL import SSL
sys.path.append("/usr/share/rhn/")
from up2date_client import getMethod
from up2date_client import up2dateErrors
from up2date_client import up2dateAuth
from up2date_client import up2dateLog
from up2date_client import rpcServer
from up2date_client import config
from up2date_client import clientCaps
from up2date_client import capabilities
from up2date_client import rhncli, rhnserver
from rhn import rhnLockfile
import xmlrpclib
cfg = config.initUp2dateConfig()
log = up2dateLog.initLog()
# action version we understand
ACTION_VERSION = 2
# lock file to check if we're disabled at the server's request
DISABLE_FILE = "/etc/sysconfig/rhn/disable"
# Actions that will run each time we execute.
LOCAL_ACTIONS = [("packages.checkNeedUpdate", ("rhnsd=1",))]
class CheckCli(rhncli.RhnCli):
def __init__(self):
super(CheckCli, self).__init__()
self.rhns_ca_cert = cfg['sslCACert']
self.server = None
def main(self):
""" Process all the actions we have in the queue. """
CheckCli.__check_instance_lock()
CheckCli.__check_rhn_disabled()
CheckCli.__check_has_system_id()
self.server = CheckCli.__get_server()
CheckCli.__update_system_id()
self.__run_remote_actions()
CheckCli.__run_local_actions()
s = rhnserver.RhnServer()
if s.capabilities.hasCapability('staging_content', 1) and cfg['stagingContent'] != 0:
self.__check_future_actions()
sys.exit(0)
def __get_action(self, status_report):
try:
action = self.server.queue.get(up2dateAuth.getSystemId(),
ACTION_VERSION, status_report)
return action
except xmlrpclib.Fault, f:
if f.faultCode == -31:
raise up2dateErrors.InsuffMgmntEntsError(f.faultString), None, sys.exc_info()[2]
else:
print "Could not retrieve action item from server %s" % self.server
print "Error code: %d%s" % (f.faultCode, f.faultString)
sys.exit(-1)
# XXX: what if no SSL in socket?
except socket.sslerror:
print "ERROR: SSL handshake to %s failed" % self.server
print """
This could signal that you are *NOT* talking to a server
whose certificate was signed by a Certificate Authority
listed in the %s file or that the
RHNS-CA-CERT file is invalid.""" % self.rhns_ca_cert
sys.exit(-1)
except socket.error:
print "Could not retrieve action from %s.\n"\
"Possible networking problem?" % str(self.server)
sys.exit(-1)
except up2dateErrors.ServerCapabilityError, e:
print e
sys.exit(1)
except SSL.Error, e:
print "ERROR: SSL errors detected"
print "%s" % e
sys.exit(-1)
def __query_future_actions(self, time_window):
try:
actions = self.server.queue.get_future_actions(up2dateAuth.getSystemId(),
time_window)
return actions
except xmlrpclib.Fault, f:
if f.faultCode == -31:
raise up2dateErrors.InsuffMgmntEntsError(f.faultString), None, sys.exc_info()[2]
else:
print "Could not retrieve action item from server %s" % self.server
print "Error code: %d%s" % (f.faultCode, f.faultString)
sys.exit(-1)
# XXX: what if no SSL in socket?
except socket.sslerror:
print "ERROR: SSL handshake to %s failed" % self.server
print """
This could signal that you are *NOT* talking to a server
whose certificate was signed by a Certificate Authority
listed in the %s file or that the
RHNS-CA-CERT file is invalid.""" % self.rhns_ca_cert
sys.exit(-1)
except socket.error:
print "Could not retrieve action from %s.\n"\
"Possible networking problem?" % str(self.server)
sys.exit(-1)
except up2dateErrors.ServerCapabilityError, e:
print e
sys.exit(1)
except SSL.Error, e:
print "ERROR: SSL errors detected"
print "%s" % e
sys.exit(-1)
def __fetch_future_action(self, action):
""" Fetch one specific action from rhnParent """
# TODO
pass
def __check_future_actions(self):
""" Retrieve scheduled actions and cache them if possible """
time_window = cfg['stagingContentWindow'] or 24;
actions = self.__query_future_actions(time_window)
for action in actions:
self.handle_action(action, cache_only=1)
def __run_remote_actions(self):
# the list of caps the client needs
caps = capabilities.Capabilities()
status_report = CheckCli.__build_status_report()
action = self.__get_action(status_report)
while action != "" and action != {}:
self.__verify_server_capabilities(caps)
if self.is_valid_action(action):
try:
up2dateAuth.updateLoginInfo()
except up2dateErrors.ServerCapabilityError, e:
print e
sys.exit(1)
self.handle_action(action)
action = self.__get_action(status_report)
def __verify_server_capabilities(self, caps):
response_headers = self.server.get_response_headers()
caps.populate(response_headers)
# do we actually want to validte here?
try:
caps.validate()
except up2dateErrors.ServerCapabilityError, e:
print e
sys.exit(1)
def __parse_action_data(self, action):
""" Parse action data and returns (method, params) """
data = action['action']
parser, decoder = xmlrpclib.getparser()
parser.feed(data.encode("utf-8"))
parser.close()
params = decoder.close()
method = decoder.getmethodname()
return (method, params)
def submit_response(self, action_id, status, message, data):
""" Submit a response for an action_id. """
# get a new server object with fresh headers
self.server = CheckCli.__get_server()
try:
ret = self.server.queue.submit(up2dateAuth.getSystemId(),
action_id, status, message, data)
except xmlrpclib.Fault, f:
print "Could not submit results to server %s" % self.server
print "Error code: %d%s" % (f.faultCode, f.faultString)
sys.exit(-1)
# XXX: what if no SSL in socket?
except socket.sslerror:
print "ERROR: SSL handshake to %s failed" % self.server
print """
This could signal that you are *NOT* talking to a server
whose certificate was signed by a Certificate Authority
listed in the %s file or that the
RHNS-CA-CERT file is invalid.""" % self.rhns_ca_cert
sys.exit(-1)
except socket.error:
print "Could not submit to %s.\n"\
"Possible networking problem?" % str(self.server)
sys.exit(-1)
return ret
def handle_action(self, action, cache_only=None):
""" Wrapper handler for the action we're asked to do. """
log.log_debug("handle_action", action)
log.log_debug("handle_action actionid = %s, version = %s" % (
action['id'], action['version']))
(method, params) = self.__parse_action_data(action)
(status, message, data) = CheckCli.__run_action(method, params, {'cache_only': cache_only})
ret = 0
if not cache_only:
log.log_debug("Sending back response", (status, message, data))
ret = self.submit_response(action['id'], status, message, data)
return ret
def is_valid_action(self, action):
log.log_debug("check_action", action)
# be very paranoid of what we get back
if type(action) != type({}):
print "Got unparseable action response from server"
sys.exit(-1)
for key in ['id', 'version', 'action']:
if not action.has_key(key):
print "Got invalid response - missing '%s'" % key
sys.exit(-1)
try:
ver = int(action['version'])
except ValueError:
ver = -1
if ver > ACTION_VERSION or ver < 0:
print "Got unknown action version %d" % ver
print action
# the -99 here is kind of magic
self.submit_response(action["id"],
xmlrpclib.Fault(-99, "Can not handle this version"))
return False
return True
@staticmethod
def __get_server():
""" Initialize a server connection and set up capability info. """
server = rpcServer.getServer()
# load the new client caps if they exist
clientCaps.loadLocalCaps()
headerlist = clientCaps.caps.headerFormat()
for (headerName, value) in headerlist:
server.add_header(headerName, value)
return server
@staticmethod
def __update_system_id():
try:
up2dateAuth.maybeUpdateVersion()
except up2dateErrors.CommunicationError, e:
print e
sys.exit(1)
@staticmethod
def __build_status_report():
status_report = {}
status_report["uname"] = os.uname()
if os.access("/proc/uptime", os.R_OK):
uptime = open("/proc/uptime", "r").read().split()
try:
status_report["uptime"] = map(int, map(float, uptime))
except (TypeError, ValueError):
status_report["uptime"] = map(lambda a: a[:-3], uptime)
except:
pass
return status_report
@staticmethod
def __run_local_actions():
"""
Hit any actions that we want to always run.
If we want to run any actions everytime rhnsd runs rhn_check,
we can add them to the list LOCAL_ACTIONS
"""
for method_params in LOCAL_ACTIONS:
method = method_params[0]
params = method_params[1]
(status, message, data) = CheckCli.__run_action(method, params)
log.log_debug("local action status: ", (status, message, data))
@staticmethod
def __do_call(method, params, kwargs={}):
log.log_debug("do_call ", method, params, kwargs)
method = getMethod.getMethod(method, "/usr/share/rhn/", "actions")
retval = method(*params, **kwargs)
return retval
@staticmethod
def __run_action(method, params, kwargs={}):
try:
(status, message, data) = CheckCli.__do_call(method, params, kwargs)
except getMethod.GetMethodException:
log.log_debug("Attempt to call an unsupported action", method,
params)
status = 6
message = "Invalid function call attempted"
data = {}
except:
log.log_exception(*sys.exc_info())
# The action code failed in some way. let's let the server know.
status = 6,
message = "Fatal error in Python code occurred"
data = {}
return (status, message, data)
@staticmethod
def __check_rhn_disabled():
""" If we're disabled, go down (almost) quietly. """
if os.path.exists(DISABLE_FILE):
print "RHN service is disabled. Check %s" % DISABLE_FILE
sys.exit(0)
@staticmethod
def __check_has_system_id():
""" Retrieve the system_id. This is required. """
if not up2dateAuth.getSystemId():
print "ERROR: unable to read system id."
sys.exit(-1)
@staticmethod
def __check_instance_lock():
lock = None
try:
lock = rhnLockfile.Lockfile('/var/run/rhn_check.pid')
except rhnLockfile.LockfileLockedException, e:
sys.stderr.write(rhncli.utf8_encode(_("Attempting to run more than one instance of rhn_check. Exiting.\n")))
sys.exit(0)
if __name__ == "__main__":
cli = CheckCli()
cli.run()
| gpl-2.0 |
glaubitz/fs-uae-debian | launcher/amitools/fs/validate/DirScan.py | 1 | 6950 | from __future__ import absolute_import
from __future__ import print_function
from .BlockScan import BlockScan
from amitools.fs.FSString import FSString
from amitools.fs.FileName import FileName
from amitools.fs.validate.Log import Log
import amitools.fs.DosType as DosType
class DirChainEntry:
"""entry of the hash chain"""
def __init__(self, blk_info):
self.blk_info = blk_info
self.parent_ok = False
self.fn_hash_ok = False
self.valid = False
self.end = False
self.orphaned = False
self.sub = None
def __str__(self):
l = []
if self.parent_ok:
l.append("parent_ok")
if self.fn_hash_ok:
l.append("fn_hash_ok")
if self.valid:
l.append("valid")
if self.end:
l.append("end")
if self.orphaned:
l.append("orphaned")
return "[DCE @%d '%s': %s]" % \
(self.blk_info.blk_num, self.blk_info.name, " ".join(l))
class DirChain:
"""representing a chain of the hashtable in a directory"""
def __init__(self, hash_val):
self.hash_val = hash_val
self.chain = []
def add(self, dce):
self.chain.append(dce)
def get_entries(self):
return self.chain
def __str__(self):
return "{DirChain +%d: #%d}" % (self.hash_val, len(self.chain))
class DirInfo:
"""information structure on a directory"""
def __init__(self, blk_info):
self.blk_info = blk_info
self.chains = {}
self.children = []
def add(self, dc):
self.chains[dc.hash_val] = dc
def add_child(self, c):
self.children.append(c)
def get(self, hash_val):
if hash_val in self.chains:
return self.chains[hash_val]
else:
return None
def get_chains(self):
return self.chains
def __str__(self):
bi = self.blk_info
blk_num = bi.blk_num
name = bi.name
parent_blk = bi.parent_blk
return "<DirInfo @%d '%s' #%d parent:%d child:#%d>" % (blk_num, name, len(self.chains), parent_blk, len(self.children))
class DirScan:
"""directory tree scanner"""
def __init__(self, block_scan, log):
self.log = log
self.block_scan = block_scan
self.root_di = None
self.intl = DosType.is_intl(block_scan.dos_type)
self.files = []
self.dirs = []
def scan_tree(self, root_blk_num, progress=None):
"""scan the root tree"""
# get root block info
root_bi = self.block_scan.get_block(root_blk_num)
if root_bi == None:
self.log.msg(Log.ERROR,"Root block not found?!",root_blk_num)
return None
# do tree scan
if progress != None:
progress.begin("dir")
self.root_di = self.scan_dir(root_bi, progress)
if progress != None:
progress.end()
return self.root_di
def scan_dir(self, dir_bi, progress):
"""check a directory by scanning through the hash table entries and follow the chains
Returns (all_chains_ok, dir_obj)
"""
# create new dir info
di = DirInfo(dir_bi)
self.dirs.append(di)
# run through hash_table of directory and build chains
chains = {}
hash_val = 0
for blk_num in dir_bi.hash_table:
if blk_num != 0:
# build chain
chain = DirChain(hash_val)
self.build_chain(chain, dir_bi, blk_num, progress)
di.add(chain)
hash_val += 1
return di
def build_chain(self, chain, dir_blk_info, blk_num, progress):
"""build a block chain"""
dir_blk_num = dir_blk_info.blk_num
dir_name = dir_blk_info.name
hash_val = chain.hash_val
# make sure entry block is first used
block_used = self.block_scan.is_block_available(blk_num)
# get entry block
blk_info = self.block_scan.read_block(blk_num)
# create dir chain entry
dce = DirChainEntry(blk_info)
chain.add(dce)
# account
if progress != None:
progress.add()
# block already used?
if block_used:
self.log.msg(Log.ERROR, "dir block already used in chain #%d of dir '%s (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# self reference?
if blk_num == dir_blk_num:
self.log.msg(Log.ERROR, "dir block in its own chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# not a block in range
if blk_info == None:
self.log.msg(Log.ERROR, "out-of-range block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# check type of entry block
b_type = blk_info.blk_type
if b_type not in (BlockScan.BT_DIR, BlockScan.BT_FILE_HDR):
self.log.msg(Log.ERROR, "invalid block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# check referenceed block type in chain
blk_type = blk_info.blk_type
if blk_type in (BlockScan.BT_ROOT, BlockScan.BT_FILE_LIST, BlockScan.BT_FILE_DATA):
self.log.msg(Log.ERROR, "invalid block type %d terminates chain #%d of dir '%s' (%d)" % (blk_type, hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# all following are ok
dce.valid = True
# check parent of block
name = blk_info.name
dce.parent_ok = (blk_info.parent_blk == dir_blk_num)
if not dce.parent_ok:
self.log.msg(Log.ERROR, "invalid parent in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num)
# check name hash
fn = FileName(name, self.intl)
fn_hash = fn.hash()
dce.fn_hash_ok = (fn_hash == hash_val)
if not dce.fn_hash_ok:
self.log.msg(Log.ERROR, "invalid name hash in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num)
# recurse into dir?
if blk_type == BlockScan.BT_DIR:
dce.sub = self.scan_dir(blk_info, progress)
elif blk_type == BlockScan.BT_FILE_HDR:
self.files.append(dce)
# check next block in chain
next_blk = blk_info.next_blk
if next_blk != 0:
self.build_chain(chain, dir_blk_info, next_blk, progress)
else:
dce.end = True
def get_all_file_hdr_blk_infos(self):
"""return all file chain entries"""
result = []
for f in self.files:
result.append(f.blk_info)
return result
def get_all_dir_infos(self):
"""return all dir infos"""
return self.dirs
def dump(self):
"""dump whole dir info structure"""
self.dump_dir_info(self.root_di, 0)
def dump_dir_info(self, di, indent):
"""dump a single dir info structure and its sub dirs"""
istr = " " * indent
print(istr, di)
for hash_value in sorted(di.get_chains().keys()):
dc = di.get(hash_value)
print(istr, " ", dc)
for dce in dc.get_entries():
print(istr, " ", dce)
sub = dce.sub
if sub != None and dce.blk_info.blk_type == BlockScan.BT_DIR:
self.dump_dir_info(sub, indent+1)
| gpl-2.0 |
zjurelinac/Linker | utils.py | 1 | 2492 | """Module containing utility functions"""
import base64
import hashlib
import random
import re
import string
from datetime import datetime
from math import *
def hashfunc( str ):
"""Returns a hash value of a given string
Takes a string and returns its SHA512 hash value, encoded in base64
"""
return base64.b64encode( hashlib.sha512( str.encode() ).digest() ).decode( 'ascii' )
def markup_to_html( str ):
"""Transforms a simple markup string into html
Supported are: bold as '**bold part**' and italic as '*italicized part*'
"""
str = re.sub( r'\*\*(.*?)\*\*', r'<b>\1</b>', str )
str = re.sub( r'\*(.*?)\*', r'<i>\1</i>', str )
return str
def pretty_date( time = False ):
"""Returns a string containing a human-readable date
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.now()
if isinstance( time, datetime ):
diff = now - time
else:
return time
if time == datetime.min:
return "Never"
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str( second_diff ) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str( round( second_diff / 60 ) ) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str( round( second_diff / 3600 ) ) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str( day_diff ) + " days ago"
if day_diff < 31:
return str( round( day_diff / 7 ) ) + " weeks ago"
if day_diff < 365:
return str( round( day_diff / 30 ) ) + " months ago"
return str( round( day_diff / 365 ) ) + " years ago"
def random_string( len ):
"""Returns a random string of a given length
"""
return ''.join( [ random.choice( string.ascii_letters + string.digits + '$%' )
for _ in range( 0, len ) ] )
def shorten_array( a, n ):
"""Shortens a given array to at most n elements, appending the number of elements that were cut off
"""
return a[ :n ] + ( [ str( len( a ) - n ) + ' others' ] if len( a ) > n else [] )
| mit |
npotenza/Stino | stino/pyarduino/arduino_compiler.py | 8 | 22618 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# 1. Copyright
# 2. Lisence
# 3. Author
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import threading
import subprocess
import re
import time
from . import base
from . import arduino_info
from . import arduino_target_params
from . import arduino_project
from . import arduino_src
class Compiler(object):
def __init__(self, path, console=None):
self.need_to_build = True
self.message_queue = base.message_queue.MessageQueue(console)
target_params_info = arduino_target_params.TargetParamsInfo()
self.params = target_params_info.get_params()
self.arduino_info = arduino_info.get_arduino_info()
self.project = arduino_project.Project(path)
project_name = self.project.get_name()
build_path = get_build_path()
build_path = os.path.join(build_path, project_name)
self.set_build_path(build_path)
self.done_build = False
self.error_occured = False
self.settings = base.settings.get_arduino_settings()
self.bare_gcc = self.settings.get('bare_gcc', False)
self.is_big_project = self.settings.get('big_project', False)
def set_build_path(self, build_path):
self.build_path = build_path
if not os.path.isdir(self.build_path):
os.makedirs(self.build_path)
def build(self):
self.message_queue.start_print()
build_thread = threading.Thread(target=self.start_build)
build_thread.start()
def start_build(self):
target_board = \
self.arduino_info.get_target_board_info().get_target_board()
if not target_board:
text = 'No board exists. Please Select Arduino Application Folder '
text += 'or Change Arduino Sketchbook Folder in Arduino Menu -> '
text += 'Preferences.\\n'
self.message_queue.put(text)
return
start_time = time.time()
self.check_new_build()
self.prepare_project_src_files()
if self.need_to_build:
project_name = self.project.get_name()
self.message_queue.put(
'[Stino - Start building "{0}"...]\\n', project_name)
self.prepare_core_src_files()
self.prepare_params()
self.prepare_cmds()
self.exec_build_cmds()
if not self.error_occured:
self.show_size_info()
end_time = time.time()
diff_time = end_time - start_time
diff_time = '%.1f' % diff_time
self.message_queue.put(
'[Stino - Done building "{0}" in {1}s.]\\n',
project_name, diff_time)
else:
self.error_occured = True
self.done_build = True
time.sleep(20)
self.message_queue.stop_print()
def check_new_build(self):
self.is_new_build = False
ide_path = self.arduino_info.get_ide_dir().get_path()
sketchbook_path = self.arduino_info.get_sketchbook_dir().get_path()
target_board = \
self.arduino_info.get_target_board_info().get_target_board()
target_board_id = target_board.get_id()
target_sub_boards = \
self.arduino_info.get_target_board_info().get_target_sub_boards()
target_sub_board_ids = [sb.get_id() for sb in target_sub_boards]
last_build_path = os.path.join(self.build_path, 'last_build.txt')
last_build_file = base.settings.Settings(last_build_path)
last_bare_gcc = last_build_file.get('bare_gcc')
last_big_project = last_build_file.get('big_project')
last_ide_path = last_build_file.get('ide_path')
last_sketchbook_path = last_build_file.get('sketchbook_path')
last_board_id = last_build_file.get('board_id')
last_sub_board_ids = last_build_file.get('sub_board_ids')
full_compilation = self.settings.get('full_compilation', False)
bare_gcc = self.settings.get('bare_gcc', False)
big_project = self.settings.get('big_project', False)
if full_compilation or ide_path != last_ide_path or \
sketchbook_path != last_sketchbook_path or \
target_board_id != last_board_id or \
target_sub_board_ids != last_sub_board_ids or \
bare_gcc != last_bare_gcc or big_project != last_big_project:
last_build_file.set('ide_path', ide_path)
last_build_file.set('sketchbook_path', sketchbook_path)
last_build_file.set('board_id', target_board_id)
last_build_file.set('sub_board_ids', target_sub_board_ids)
last_build_file.set('bare_gcc', bare_gcc)
last_build_file.set('big_project', big_project)
self.is_new_build = True
def prepare_project_src_files(self):
self.project_src_changed = False
self.project_cpp_obj_pairs = []
self.project_obj_paths = []
ino_files = self.project.list_ino_files()
if ino_files and not self.bare_gcc:
combined_file_name = self.project.get_name() + '.ino.cpp'
combined_file_path = os.path.join(
self.build_path, combined_file_name)
combined_file = base.abs_file.File(combined_file_path)
combined_obj_path = combined_file_path + '.o'
self.project_obj_paths.append(combined_obj_path)
ino_changed = check_ino_change(ino_files, combined_file)
if self.is_new_build or ino_changed:
core_path = self.params.get('build.core.path', '')
main_cxx_path = os.path.join(core_path, 'main.cxx')
if os.path.isfile(main_cxx_path):
main_cxx_file = base.abs_file.File(main_cxx_path)
ino_files.append(main_cxx_file)
combined_src = arduino_src.combine_ino_files(
core_path, ino_files)
combined_file.write(combined_src)
cpp_obj_pair = (combined_file_path, combined_obj_path)
self.project_cpp_obj_pairs.append(cpp_obj_pair)
sub_dir_name = self.project.get_name()
cpp_files = self.project.list_cpp_files(self.is_big_project)
self.project_obj_paths += gen_obj_paths(
self.project.get_path(), self.build_path, sub_dir_name, cpp_files)
cpp_obj_pairs = gen_cpp_obj_pairs(self.project.get_path(),
self.build_path, sub_dir_name,
cpp_files, self.is_new_build)
self.project_cpp_obj_pairs += cpp_obj_pairs
if self.project_cpp_obj_pairs:
self.project_src_changed = True
self.need_to_build = bool(self.project_obj_paths)
def prepare_lib_src_files(self):
ino_files = []
if not self.bare_gcc:
ino_files = self.project.list_ino_files()
cpp_files = self.project.list_cpp_files(self.is_big_project)
h_files = self.project.list_h_files(self.is_big_project)
src_files = ino_files + cpp_files + h_files
self.libraries = arduino_src.list_libraries(
src_files, self.arduino_info)
last_build_path = os.path.join(self.build_path, 'last_build.txt')
last_build_file = base.settings.Settings(last_build_path)
last_lib_paths = last_build_file.get('lib_paths', [])
lib_paths = [lib.get_path() for lib in self.libraries]
self.library_src_changed = (lib_paths != last_lib_paths)
last_build_file.set('lib_paths', lib_paths)
def prepare_core_src_files(self):
self.core_obj_paths = []
self.core_cpp_obj_pairs = []
self.core_src_changed = False
self.prepare_lib_src_files()
target_arch = \
self.arduino_info.get_target_board_info().get_target_arch()
for library in self.libraries:
library_path = library.get_path()
library_name = library.get_name()
sub_dir_name = 'lib_' + library_name
lib_cpp_files = library.list_cpp_files(target_arch)
lib_obj_paths = gen_obj_paths(library_path, self.build_path,
sub_dir_name, lib_cpp_files)
lib_cpp_obj_pairs = gen_cpp_obj_pairs(
library_path, self.build_path, sub_dir_name, lib_cpp_files,
self.is_new_build)
self.core_obj_paths += lib_obj_paths
self.core_cpp_obj_pairs += lib_cpp_obj_pairs
self.core_paths = []
if not self.bare_gcc:
core_path = self.params.get('build.core.path')
cores_path = os.path.dirname(core_path)
common_core_path = os.path.join(cores_path, 'Common')
varient_path = self.params.get('build.variant.path')
build_hardware = self.params.get('build.hardware', '')
core_paths = [core_path, common_core_path, varient_path]
if build_hardware:
platform_path = self.params.get('runtime.platform.path', '')
hardware_path = os.path.join(platform_path, build_hardware)
core_paths.append(hardware_path)
core_paths = [p for p in core_paths if os.path.isdir(p)]
for core_path in core_paths:
if core_path not in self.core_paths:
self.core_paths.append(core_path)
for core_path in self.core_paths:
core_obj_paths, core_cpp_obj_pairs = gen_core_objs(
core_path, 'core_', self.build_path, self.is_new_build)
self.core_obj_paths += core_obj_paths
self.core_cpp_obj_pairs += core_cpp_obj_pairs
if self.core_cpp_obj_pairs:
self.core_src_changed = True
def prepare_params(self):
self.archive_file_name = 'core.a'
self.params['build.path'] = self.build_path
self.params['build.project_name'] = self.project.get_name()
self.params['archive_file'] = self.archive_file_name
extra_flag = ' ' + self.settings.get('extra_flag', '')
c_flags = self.params.get('compiler.c.flags', '') + extra_flag
cpp_flags = self.params.get('compiler.cpp.flags', '') + extra_flag
S_flags = self.params.get('compiler.S.flags', '') + extra_flag
self.params['compiler.c.flags'] = c_flags
self.params['compiler.cpp.flags'] = cpp_flags
self.params['compiler.S.flags'] = S_flags
project_path = self.project.get_path()
include_paths = [project_path] + self.core_paths
target_arch = \
self.arduino_info.get_target_board_info().get_target_arch()
for lib in self.libraries:
src_dirs = lib.list_src_dirs(target_arch)
include_paths += [d.get_path() for d in src_dirs]
includes = ['"-I%s"' % path for path in include_paths]
self.params['includes'] = ' '.join(includes)
ide_path = self.arduino_info.get_ide_dir().get_path()
if not 'compiler.path' in self.params:
compiler_path = '{runtime.ide.path}/hardware/tools/avr/bin/'
self.params['compiler.path'] = compiler_path
compiler_path = self.params.get('compiler.path')
compiler_path = compiler_path.replace('{runtime.ide.path}', ide_path)
if not os.path.isdir(compiler_path):
self.params['compiler.path'] = ''
self.params = arduino_target_params.replace_param_values(self.params)
def prepare_cmds(self):
compile_c_cmd = self.params.get('recipe.c.o.pattern', '')
compile_cpp_cmd = self.params.get('recipe.cpp.o.pattern', '')
compile_asm_cmd = self.params.get('recipe.S.o.pattern', '')
ar_cmd = self.params.get('recipe.ar.pattern', '')
combine_cmd = self.params.get('recipe.c.combine.pattern', '')
eep_cmd = self.params.get('recipe.objcopy.eep.pattern', '')
hex_cmd = self.params.get('recipe.objcopy.hex.pattern', '')
self.build_files = []
self.file_cmds_dict = {}
for cpp_path, obj_path in (self.project_cpp_obj_pairs +
self.core_cpp_obj_pairs):
cmd = compile_cpp_cmd
ext = os.path.splitext(cpp_path)[1]
if ext == '.c':
cmd = compile_c_cmd
elif ext == '.S':
cmd = compile_asm_cmd
cmd = cmd.replace('{source_file}', cpp_path)
cmd = cmd.replace('{object_file}', obj_path)
self.build_files.append(obj_path)
self.file_cmds_dict[obj_path] = [cmd]
core_changed = False
core_archive_path = os.path.join(self.build_path,
self.archive_file_name)
if (self.library_src_changed or self.core_src_changed) and \
os.path.isfile(core_archive_path):
os.remove(core_archive_path)
if not os.path.isfile(core_archive_path):
core_changed = True
cmds = []
for obj_path in self.core_obj_paths:
cmd = ar_cmd.replace('{object_file}', obj_path)
cmds.append(cmd)
self.build_files.append(core_archive_path)
self.file_cmds_dict[core_archive_path] = cmds
project_file_base_path = os.path.join(self.build_path,
self.project.get_name())
elf_file_path = project_file_base_path + '.elf'
if self.project_src_changed or core_changed:
if os.path.isfile(elf_file_path):
os.remove(elf_file_path)
if not os.path.isfile(elf_file_path):
obj_paths = ' '.join(['"%s"' % p for p in self.project_obj_paths])
cmd = combine_cmd.replace('{object_files}', obj_paths)
if not self.core_obj_paths:
core_archive_path = \
self.build_path + '/' + self.archive_file_name
text = '"' + core_archive_path + '"'
cmd = cmd.replace(text, '')
self.build_files.append(elf_file_path)
self.file_cmds_dict[elf_file_path] = [cmd]
if eep_cmd:
eep_file_path = project_file_base_path + '.eep'
self.build_files.append(eep_file_path)
self.file_cmds_dict[eep_file_path] = [eep_cmd]
if hex_cmd:
ext = '.bin'
if '.hex' in hex_cmd:
ext = '.hex'
hex_file_path = project_file_base_path + ext
self.build_files.append(hex_file_path)
self.file_cmds_dict[hex_file_path] = [hex_cmd]
def exec_build_cmds(self):
show_compilation_output = self.settings.get('build_verbose', False)
self.working_dir = self.arduino_info.get_ide_dir().get_path()
error_occured = False
total_file_number = len(self.build_files)
for index, build_file in enumerate(self.build_files):
percent = str(int(100 * (index + 1) / total_file_number )).rjust(3)
self.message_queue.put('[{1}%] Creating {0}...\\n',
build_file, percent)
cmds = self.file_cmds_dict.get(build_file)
error_occured = exec_cmds(self.working_dir, cmds,
self.message_queue,
show_compilation_output)
if error_occured:
self.error_occured = True
break
def show_size_info(self):
size_cmd = self.params.get('recipe.size.pattern', '')
return_code, stdout, stderr = exec_cmd(self.working_dir, size_cmd)
if stderr:
self.message_queue.put(stderr + '\n')
self.print_size(stdout)
def print_size(self, text):
size_total = int(self.params.get('upload.maximum_size'))
size_data_total = int(self.params.get('upload.maximum_data_size'))
size_regex = self.params.get('recipe.size.regex')
pattern = re.compile(size_regex, re.M)
result = pattern.findall(text)
if result:
try:
int(result[0])
except TypeError:
result = result[0][:2]
size = sum(int(n) for n in result)
size_percent = size / size_total * 100
size = regular_numner(size)
size_total = regular_numner(size_total)
size_percent = '%.1f' % size_percent
txt = 'Sketch uses {0} bytes ({1}%) '
txt += 'of program storage space. Maximum is {2} bytes.\\n'
self.message_queue.put(txt, size, size_percent, size_total)
size_regex_data = self.params.get('recipe.size.regex.data', '')
if size_regex_data and size_data_total:
pattern = re.compile(size_regex_data, re.M)
result = pattern.findall(text)
if result:
try:
int(result[0])
except TypeError:
result = result[0][1:]
size_data = sum(int(n) for n in result)
size_data_percent = size_data / size_data_total * 100
size_data_remain = size_data_total - size_data
size_data = regular_numner(size_data)
size_data_remain = regular_numner(size_data_remain)
size_data_total = regular_numner(size_data_total)
size_data_percent = '%.1f' % size_data_percent
txt = 'Global variables use {0} bytes ({1}%) of dynamic memory, '
txt += 'leaving {2} bytes for local variables. '
txt += 'Maximum is {3} bytes.\\n'
self.message_queue.put(txt, size_data, size_data_percent,
size_data_remain, size_data_total)
def is_finished(self):
return self.done_build
def has_error(self):
return self.error_occured
def get_params(self):
return self.params
def get_ide_path(self):
return self.arduino_info.get_ide_dir().get_path()
def get_build_path():
settings = base.settings.get_arduino_settings()
build_path = settings.get('build_path', '')
if not build_path:
tmp_path = base.sys_path.get_tmp_path()
build_path = os.path.join(tmp_path, 'Stino_build')
if not os.path.isdir(build_path):
os.makedirs(build_path)
return build_path
def check_ino_change(ino_files, combined_file):
ino_changed = False
combined_file_path = combined_file.get_path()
obj_path = combined_file_path + '.o'
obj_file = base.abs_file.File(obj_path)
for ino_file in ino_files:
if ino_file.get_mtime() > obj_file.get_mtime():
ino_changed = True
break
return ino_changed
def gen_cpp_obj_pairs(src_path, build_path, sub_dir,
cpp_files, new_build=False):
obj_paths = gen_obj_paths(src_path, build_path, sub_dir, cpp_files)
obj_files = [base.abs_file.File(path) for path in obj_paths]
path_pairs = []
for cpp_file, obj_file in zip(cpp_files, obj_files):
if new_build or cpp_file.get_mtime() > obj_file.get_mtime():
path_pair = (cpp_file.get_path(), obj_file.get_path())
path_pairs.append(path_pair)
return path_pairs
def gen_obj_paths(src_path, build_path, sub_dir, cpp_files):
obj_paths = []
build_path = os.path.join(build_path, sub_dir)
for cpp_file in cpp_files:
cpp_file_path = cpp_file.get_path()
sub_path = cpp_file_path.replace(src_path, '')[1:] + '.o'
obj_path = os.path.join(build_path, sub_path)
obj_paths.append(obj_path)
obj_dir_name = os.path.dirname(obj_path)
if not os.path.isdir(obj_dir_name):
os.makedirs(obj_dir_name)
return obj_paths
def exec_cmds(working_dir, cmds, message_queue, is_verbose=False):
error_occured = False
for cmd in cmds:
return_code, stdout, stderr = exec_cmd(working_dir, cmd)
if is_verbose:
message_queue.put(cmd + '\n')
if stdout:
message_queue.put(stdout + '\n')
if stderr:
message_queue.put(stderr + '\n')
if return_code != 0:
message_queue.put(
'[Stino - Exit with error code {0}.]\\n', return_code)
error_occured = True
break
return error_occured
def exec_cmd(working_dir, cmd):
os.environ['CYGWIN'] = 'nodosfilewarning'
if cmd:
os.chdir("/")
if "avr-" in cmd:
cmd = cmd.replace('"','',1)
avr = '"%s\\hardware\\tools\\avr' % working_dir
cmd = avr + '\\bin\\' + cmd
cmd = cmd.replace("{runtime.tools.avrdude.path}", avr)
cmd = formatCommand(cmd)
compile_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
result = compile_proc.communicate()
return_code = compile_proc.returncode
stdout = result[0].decode(base.sys_info.get_sys_encoding())
stderr = result[1].decode(base.sys_info.get_sys_encoding())
else:
return_code = 0
stdout = ''
stderr = ''
return (return_code, stdout, stderr)
def formatCommand(cmd):
if '::' in cmd:
cmd = cmd.replace('::', ' ')
cmd = cmd.replace('\\', '/')
os_name = base.sys_info.get_os_name()
python_version = base.sys_info.get_python_version()
if python_version < 3 and os_name == 'windows':
cmd = '"%s"' % cmd
return cmd
def regular_numner(num):
txt = str(num)
regular_num = ''
for index, char in enumerate(txt[::-1]):
regular_num += char
if (index + 1) % 3 == 0 and index + 1 != len(txt):
regular_num += ','
regular_num = regular_num[::-1]
return regular_num
def gen_core_objs(core_path, folder_prefix, build_path, is_new_build):
core_dir = base.abs_file.Dir(core_path)
core_cpp_files = core_dir.recursive_list_files(
arduino_src.CPP_EXTS, ['libraries'])
sub_dir_name = folder_prefix + core_dir.get_name()
core_obj_paths = gen_obj_paths(core_path, build_path,
sub_dir_name, core_cpp_files)
core_cpp_obj_pairs = gen_cpp_obj_pairs(
core_path, build_path, sub_dir_name, core_cpp_files, is_new_build)
return (core_obj_paths, core_cpp_obj_pairs)
| mit |
thomashaw/SecGen | modules/utilities/unix/ctf/metactf/files/repository/src_angr/16_angr_arbitrary_write/generate.py | 2 | 1362 | #!/usr/bin/env python
import sys, random, os, tempfile
from templite import Templite
def generate(argv):
if len(argv) != 3:
print 'Usage: pypy generate.py [seed] [output_file]'
sys.exit()
seed = argv[1]
output_file = argv[2]
random.seed(seed)
rodata_tail_modifier = 0x2e
rodata_parts = ''.join([ chr(random.randint(ord('A'), ord('Z'))) for _ in xrange(3) ]
+ [ chr(random.randint(ord('A') - rodata_tail_modifier, ord('Z') - rodata_tail_modifier)) ])
rodata_address = '0x' + rodata_parts.encode('hex')
userdef_charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
userdef = ''.join(random.choice(userdef_charset) for _ in range(8))
description = ''
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'description.txt'), 'r') as desc_file:
description = desc_file.read().encode('string_escape').replace('\"', '\\\"')
template = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '16_angr_arbitrary_write.c.templite'), 'r').read()
c_code = Templite(template).render(description=description, userdef=userdef)
with tempfile.NamedTemporaryFile(delete=False, suffix='.c') as temp:
temp.write(c_code)
temp.seek(0)
os.system('gcc -m32 -fno-stack-protector -Wl,--section-start=.data=' + rodata_address + ' -o ' + output_file + ' ' + temp.name)
if __name__ == '__main__':
generate(sys.argv)
| gpl-3.0 |
Crystalnix/BitPop | chrome/test/functional/autofill.py | 2 | 41065 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import simplejson
import autofill_dataset_converter
import autofill_dataset_generator
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_pages import settings
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill works correctly"""
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Hit <enter> to dump info.. ')
self.pprint(self.GetAutofillProfile())
def testFillProfile(self):
"""Test filling profiles and overwriting with new profiles."""
profiles = [{'NAME_FIRST': ['Bob',],
'NAME_LAST': ['Smith',], 'ADDRESS_HOME_ZIP': ['94043',],},
{'EMAIL_ADDRESS': ['sue@example.com',],
'COMPANY_NAME': ['Company X',],}]
credit_cards = [{'CREDIT_CARD_NUMBER': '6011111111111117',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'},
{'CREDIT_CARD_NAME': 'Bob C. Smith'}]
self.FillAutofillProfile(profiles=profiles, credit_cards=credit_cards)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
profiles = [ {'NAME_FIRST': ['Larry']}]
self.FillAutofillProfile(profiles=profiles)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
def testFillProfileMultiValue(self):
"""Test filling a profile with multi-value data."""
profile_expected = [{'NAME_FIRST': ['Bob', 'Joe'],
'NAME_LAST': ['Smith', 'Jones'],
'ADDRESS_HOME_ZIP': ['94043',],},]
self.FillAutofillProfile(profiles=profile_expected)
profile_actual = self.GetAutofillProfile()
self.assertEqual(profile_expected, profile_actual['profiles'])
def testFillProfileCrazyCharacters(self):
"""Test filling profiles with unicode strings and crazy characters."""
# Adding autofill profiles.
file_path = os.path.join(self.DataDir(), 'autofill', 'functional',
'crazy_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
self.assertEqual(profiles, self.GetAutofillProfile()['profiles'],
msg='Autofill profile data does not match.')
# Adding credit cards.
file_path = os.path.join(self.DataDir(), 'autofill', 'functional',
'crazy_creditcards.txt')
test_data = self.EvalDataFrom(file_path)
credit_cards_input = test_data['input']
self.FillAutofillProfile(credit_cards=credit_cards_input)
self.assertEqual(test_data['expected'],
self.GetAutofillProfile()['credit_cards'],
msg='Autofill credit card data does not match.')
def testGetProfilesEmpty(self):
"""Test getting profiles when none have been filled."""
profile = self.GetAutofillProfile()
self.assertEqual([], profile['profiles'])
self.assertEqual([], profile['credit_cards'])
def testAutofillInvalid(self):
"""Test filling in invalid values for profiles are saved as-is.
Phone information entered into the prefs UI is not validated or rejected
except for duplicates.
"""
# First try profiles with invalid ZIP input.
without_invalid = {'NAME_FIRST': ['Will',],
'ADDRESS_HOME_CITY': ['Sunnyvale',],
'ADDRESS_HOME_STATE': ['CA',],
'ADDRESS_HOME_ZIP': ['my_zip',],
'ADDRESS_HOME_COUNTRY': ['United States',]}
# Add invalid data for phone field.
with_invalid = without_invalid.copy()
with_invalid['PHONE_HOME_WHOLE_NUMBER'] = ['Invalid_Phone_Number',]
self.FillAutofillProfile(profiles=[with_invalid])
self.assertNotEqual(
[without_invalid], self.GetAutofillProfile()['profiles'],
msg='Phone data entered into prefs UI is validated.')
def testAutofillPrefsStringSavedAsIs(self):
"""Test invalid credit card numbers typed in prefs should be saved as-is."""
credit_card = {'CREDIT_CARD_NUMBER': 'Not_0123-5Checked'}
self.FillAutofillProfile(credit_cards=[credit_card])
self.assertEqual([credit_card],
self.GetAutofillProfile()['credit_cards'],
msg='Credit card number in prefs not saved as-is.')
def _WaitForWebpageFormReadyToFillIn(self, form_profile, tab_index, windex):
"""Waits until an autofill form on a webpage is ready to be filled in.
A call to NavigateToURL() may return before all form elements on the page
are ready to be accessed. This function waits until they are ready to be
filled in.
Args:
form_profile: A dictionary representing an autofill profile in which the
keys are strings corresponding to webpage element IDs.
tab_index: The index of the tab containing the webpage form to check.
windex: The index of the window containing the webpage form to check.
"""
field_check_code = ''.join(
['if (!document.getElementById("%s")) ready = "false";' %
key for key in form_profile.keys()])
js = """
var ready = 'true';
if (!document.getElementById("testform"))
ready = 'false';
%s
window.domAutomationController.send(ready);
""" % field_check_code
self.assertTrue(
self.WaitUntil(lambda: self.ExecuteJavascript(js, tab_index, windex),
expect_retval='true'),
msg='Timeout waiting for webpage form to be ready to be filled in.')
def _FillFormAndSubmit(self, datalist, filename, tab_index=0, windex=0):
"""Navigate to the form, input values into the fields, and submit the form.
If multiple profile dictionaries are specified as input, this function will
repeatedly navigate to the form, fill it out, and submit it, once for each
specified profile dictionary.
Args:
datalist: A list of dictionaries, where each dictionary represents the
key/value pairs for profiles or credit card values.
filename: HTML form website file. The file is the basic file name and not
the path to the file. File is assumed to be located in
autofill/functional directory of the data folder.
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to work on; defaults to 0
(first window).
"""
url = self.GetHttpURLForDataPath('autofill', 'functional', filename)
for profile in datalist:
self.NavigateToURL(url)
self._WaitForWebpageFormReadyToFillIn(profile, tab_index, windex)
# Fill in and submit the form.
js = ''.join(['document.getElementById("%s").value = "%s";' %
(key, value) for key, value in profile.iteritems()])
js += 'document.getElementById("testform").submit();'
self.SubmitAutofillForm(js, tab_index=tab_index, windex=windex)
def _LuhnCreditCardNumberValidator(self, number):
"""Validates whether a number is valid or invalid using the Luhn test.
Validation example:
1. Example number: 49927398716
2. Reverse the digits: 61789372994
3. Sum the digits in the odd-numbered position for s1:
6 + 7 + 9 + 7 + 9 + 4 = 42
4. Take the digits in the even-numbered position: 1, 8, 3, 2, 9
4.1. Two times each digit in the even-numbered position: 2, 16, 6, 4, 18
4.2. For each resulting value that is now 2 digits, add the digits
together: 2, 7, 6, 4, 9
(0 + 2 = 2, 1 + 6 = 7, 0 + 6 = 6, 0 + 4 = 4, 1 + 8 = 9)
4.3. Sum together the digits for s2: 2 + 7 + 6 + 4 + 9 = 28
5. Sum together s1 + s2 and if the sum ends in zero, the number passes the
Luhn test: 42 + 28 = 70 which is a valid credit card number.
Args:
number: the credit card number being validated, as a string.
Returns:
boolean whether the credit card number is valid or not.
"""
# Filters out non-digit characters.
number = re.sub('[^0-9]', '', number)
reverse = [int(ch) for ch in str(number)][::-1]
# The divmod of the function splits a number into two digits, ready for
# summing.
return ((sum(reverse[0::2]) + sum(sum(divmod(d*2, 10))
for d in reverse[1::2])) % 10 == 0)
def testInvalidCreditCardNumberIsNotAggregated(self):
"""Test credit card info with an invalid number is not aggregated.
When filling out a form with an invalid credit card number (one that
does not pass the Luhn test) the credit card info should not be saved into
Autofill preferences.
"""
invalid_cc_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7890',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
cc_number = invalid_cc_info['CREDIT_CARD_NUMBER']
self._FillFormAndSubmit([invalid_cc_info], 'autofill_creditcard_form.html',
tab_index=0, windex=0)
self.assertFalse(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires an invalid credit card number.')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(
cc_infobar, msg='Save credit card infobar offered to save CC info.')
def testWhitespacesAndSeparatorCharsStrippedForValidCCNums(self):
"""Test whitespaces and separator chars are stripped for valid CC numbers.
The credit card numbers used in this test pass the Luhn test.
For reference: http://www.merriampark.com/anatomycc.htm
"""
credit_card_info = [{'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'},
{'CREDIT_CARD_NAME': 'Jane Doe',
'CREDIT_CARD_NUMBER': '4417-1234-5678-9113',
'CREDIT_CARD_EXP_MONTH': '10',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2013'}]
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'autofill_creditcard_form.html')
for cc_info in credit_card_info:
self.assertTrue(
self._LuhnCreditCardNumberValidator(cc_info['CREDIT_CARD_NUMBER']),
msg='This test requires a valid credit card number.')
self.NavigateToURL(url)
self._WaitForWebpageFormReadyToFillIn(cc_info, 0, 0)
# Fill in and submit the form.
js = ''.join(['document.getElementById("%s").value = "%s";' %
(key, value) for key, value in cc_info.iteritems()])
js += 'document.getElementById("testform").submit();'
self.SubmitAutofillForm(js, tab_index=0, windex=0)
# Verify the filled-in credit card number against the aggregated number.
aggregated_cc_1 = (
self.GetAutofillProfile()['credit_cards'][0]['CREDIT_CARD_NUMBER'])
aggregated_cc_2 = (
self.GetAutofillProfile()['credit_cards'][1]['CREDIT_CARD_NUMBER'])
self.assertFalse((' ' in aggregated_cc_1 or ' ' in aggregated_cc_2 or
'-' in aggregated_cc_1 or '-' in aggregated_cc_2),
msg='Whitespaces or separator chars not stripped.')
def testAggregatesMinValidProfile(self):
"""Test that Autofill aggregates a minimum valid profile.
The minimum required address fields must be specified: First Name,
Last Name, Address Line 1, City, Zip Code, and State.
"""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'Mountain View',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110'}
self._FillFormAndSubmit(
[profile], 'duplicate_profiles_test.html', tab_index=0, windex=0)
self.assertTrue(self.GetAutofillProfile()['profiles'],
msg='Profile with minimum address values not aggregated.')
def testProfilesNotAggregatedWithNoAddress(self):
"""Test Autofill does not aggregate profiles with no address info.
The minimum required address fields must be specified: First Name,
Last Name, Address Line 1, City, Zip Code, and State.
"""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@example.com',
'COMPANY_NAME': 'Company X',
'ADDRESS_HOME_CITY': 'Mountain View',
'PHONE_HOME_WHOLE_NUMBER': '650-555-4567',}
self._FillFormAndSubmit(
[profile], 'duplicate_profiles_test.html', tab_index=0, windex=0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with no address info was aggregated.')
def testProfilesNotAggregatedWithInvalidEmail(self):
"""Test Autofill does not aggregate profiles with an invalid email."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'garbage',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-871-4567',}
self._FillFormAndSubmit(
[profile], 'duplicate_profiles_test.html', tab_index=0, windex=0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with invalid email was aggregated.')
def testComparePhoneNumbers(self):
"""Test phone fields parse correctly from a given profile.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
"""
profile_path = os.path.join(self.DataDir(), 'autofill', 'functional',
'phone_pinput_autofill.txt')
profile_expected_path = os.path.join(
self.DataDir(), 'autofill', 'functional',
'phone_pexpected_autofill.txt')
profiles = self.EvalDataFrom(profile_path)
profiles_expected = self.EvalDataFrom(profile_expected_path)
self.FillAutofillProfile(profiles=profiles)
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'form_phones.html')
for profile_expected in profiles_expected:
self.NavigateToURL(url)
self.assertTrue(self.AutofillPopulateForm('NAME_FIRST'),
msg='Autofill form could not be populated.')
form_values = {}
for key, value in profile_expected.iteritems():
js_returning_field_value = (
'var field_value = document.getElementById("%s").value;'
'window.domAutomationController.send(field_value);'
) % key
form_values[key] = self.ExecuteJavascript(
js_returning_field_value, 0, 0)
self.assertEqual(
[form_values[key]], value,
msg=('Original profile not equal to expected profile at key: "%s"\n'
'Expected: "%s"\nReturned: "%s"' % (
key, value, [form_values[key]])))
def testProfileSavedWithValidCountryPhone(self):
"""Test profile is saved if phone number is valid in selected country.
The data file contains two profiles with valid phone numbers and two
profiles with invalid phone numbers from their respective country.
"""
profiles_list = self.EvalDataFrom(
os.path.join(self.DataDir(), 'autofill', 'functional',
'phonechecker.txt'))
self._FillFormAndSubmit(profiles_list, 'autofill_test_form.html',
tab_index=0, windex=0)
num_profiles = len(self.GetAutofillProfile()['profiles'])
self.assertEqual(2, num_profiles,
msg='Expected 2 profiles, but got %d.' % num_profiles)
def testCharsStrippedForAggregatedPhoneNumbers(self):
"""Test aggregated phone numbers are standardized (not saved "as-is")."""
profiles_list = self.EvalDataFrom(
os.path.join(self.DataDir(), 'autofill', 'functional',
'phonecharacters.txt'))
self._FillFormAndSubmit(profiles_list, 'autofill_test_form.html',
tab_index=0, windex=0)
us_phone = self.GetAutofillProfile()[
'profiles'][0]['PHONE_HOME_WHOLE_NUMBER']
de_phone = self.GetAutofillProfile()[
'profiles'][1]['PHONE_HOME_WHOLE_NUMBER']
self.assertEqual(
['+1 408-871-4567',], us_phone,
msg='Aggregated US phone number %s not standardized.' % us_phone)
self.assertEqual(
['+49 40/808179000',], de_phone,
msg='Aggregated Germany phone number %s not standardized.' % de_phone)
def testAppendCountryCodeForAggregatedPhones(self):
"""Test Autofill appends country codes to aggregated phone numbers.
The country code is added for the following case:
The phone number contains the correct national number size and
is a valid format.
"""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'ADDRESS_HOME_COUNTRY': 'Germany',
'PHONE_HOME_WHOLE_NUMBER': '(08) 450 777-777',}
self._FillFormAndSubmit(
[profile], 'autofill_test_form.html', tab_index=0, windex=0)
de_phone = self.GetAutofillProfile()[
'profiles'][0]['PHONE_HOME_WHOLE_NUMBER']
self.assertEqual(
'+49', de_phone[0][:3],
msg='Country code missing from phone number %s.' % de_phone)
def testCCInfoNotStoredWhenAutocompleteOff(self):
"""Test CC info not offered to be saved when autocomplete=off for CC field.
If the credit card number field has autocomplete turned off, then the credit
card infobar should not offer to save the credit card info. The credit card
number must be a valid Luhn number.
"""
credit_card_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408041234567893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
self._FillFormAndSubmit(
[credit_card_info], 'cc_autocomplete_off_test.html',
tab_index=0, windex=0)
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(cc_infobar,
msg='Save credit card infobar offered to save CC info.')
def testNoAutofillForReadOnlyFields(self):
"""Test that Autofill does not fill in read-only fields."""
profile = {'NAME_FIRST': ['Bob',],
'NAME_LAST': ['Smith',],
'EMAIL_ADDRESS': ['bsmith@gmail.com',],
'ADDRESS_HOME_LINE1': ['1234 H St.',],
'ADDRESS_HOME_CITY': ['San Jose',],
'ADDRESS_HOME_STATE': ['CA',],
'ADDRESS_HOME_ZIP': ['95110',],
'COMPANY_NAME': ['Company X',],
'PHONE_HOME_WHOLE_NUMBER': ['408-871-4567',],}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'read_only_field_test.html')
self.NavigateToURL(url)
self.assertTrue(self.AutofillPopulateForm('firstname'),
msg='Autofill form could not be populated.')
js_return_readonly_field = (
'var field_value = document.getElementById("email").value;'
'window.domAutomationController.send(field_value);')
readonly_field_value = self.ExecuteJavascript(
js_return_readonly_field, 0, 0)
js_return_addrline1_field = (
'var field_value = document.getElementById("address").value;'
'window.domAutomationController.send(field_value);')
addrline1_field_value = self.ExecuteJavascript(
js_return_addrline1_field, 0, 0)
self.assertNotEqual(
readonly_field_value, profile['EMAIL_ADDRESS'][0],
'Autofill filled in value "%s" for a read-only field.'
% readonly_field_value)
self.assertEqual(
addrline1_field_value, profile['ADDRESS_HOME_LINE1'][0],
'Unexpected value "%s" in the Address field.' % addrline1_field_value)
def testFormFillableOnReset(self):
"""Test form is fillable from a profile after form was reset.
Steps:
1. Fill form using a saved profile.
2. Reset the form.
3. Fill form using a saved profile.
"""
profile = {'NAME_FIRST': ['Bob',],
'NAME_LAST': ['Smith',],
'EMAIL_ADDRESS': ['bsmith@gmail.com',],
'ADDRESS_HOME_LINE1': ['1234 H St.',],
'ADDRESS_HOME_CITY': ['San Jose',],
'PHONE_HOME_WHOLE_NUMBER': ['4088714567',],}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'autofill_test_form.html')
self.NavigateToURL(url)
# Fill form using an address profile.
self.assertTrue(self.AutofillPopulateForm('NAME_FIRST'),
msg='Autofill form could not be populated.')
# Reset the form.
self.ExecuteJavascript('document.getElementById("testform").reset();'
'window.domAutomationController.send("done");',
0, 0)
# Fill in the form using an Autofill profile.
self.assertTrue(self.AutofillPopulateForm('NAME_FIRST'),
msg='Autofill form could not be populated.')
# Verify value in fields match value in the profile dictionary.
form_values = {}
for key, value in profile.iteritems():
js_returning_field_value = (
'var field_value = document.getElementById("%s").value;'
'window.domAutomationController.send(field_value);'
) % key
form_values[key] = self.ExecuteJavascript(
js_returning_field_value, 0, 0)
self.assertEqual(
[form_values[key]], value,
msg=('Original profile not equal to expected profile at key: "%s"\n'
'Expected: "%s"\nReturned: "%s"' % (
key, value, [form_values[key]])))
def testDistinguishMiddleInitialWithinName(self):
"""Test Autofill distinguishes a middle initial in a name."""
profile = {'NAME_FIRST': ['Bob',],
'NAME_MIDDLE': ['Leo',],
'NAME_LAST': ['Smith',],
'EMAIL_ADDRESS': ['bsmith@gmail.com',],
'ADDRESS_HOME_LINE1': ['1234 H St.',],
'ADDRESS_HOME_CITY': ['San Jose',],
'PHONE_HOME_WHOLE_NUMBER': ['4088714567',],}
middle_initial = profile['NAME_MIDDLE'][0][0]
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'autofill_middleinit_form.html')
self.NavigateToURL(url)
# Fill form using an address profile.
self.assertTrue(self.AutofillPopulateForm('NAME_FIRST'),
msg='Autofill form could not be populated.')
js_return_middleinit_field = (
'var field_value = document.getElementById("NAME_MIDDLE").value;'
'window.domAutomationController.send(field_value);')
middleinit_field_value = self.ExecuteJavascript(
js_return_middleinit_field, 0, 0)
self.assertEqual(middleinit_field_value, middle_initial,
msg=('Middle initial "%s" not distinguished from "%s".' %
(middleinit_field_value, profile['NAME_MIDDLE'])))
def testMultipleEmailFilledByOneUserGesture(self):
"""Test forms with multiple email addresses are filled properly.
Entire form should be filled with one user gesture.
"""
profile = {'NAME_FIRST': ['Bob',],
'NAME_LAST': ['Smith',],
'EMAIL_ADDRESS': ['bsmith@gmail.com',],
'PHONE_HOME_WHOLE_NUMBER': ['4088714567',],}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'autofill_confirmemail_form.html')
self.NavigateToURL(url)
# Fill form using an address profile.
self.assertTrue(self.AutofillPopulateForm('NAME_FIRST'),
msg='Autofill form could not be populated.')
js_return_confirmemail_field = (
'var field_value = document.getElementById("EMAIL_CONFIRM").value;'
'window.domAutomationController.send(field_value);')
confirmemail_field_value = self.ExecuteJavascript(
js_return_confirmemail_field, 0, 0)
self.assertEqual([confirmemail_field_value], profile['EMAIL_ADDRESS'],
msg=('Confirmation Email address "%s" not equal to Email\n'
'address "%s".' % ([confirmemail_field_value],
profile['EMAIL_ADDRESS'])))
def testProfileWithEmailInOtherFieldNotSaved(self):
"""Test profile not aggregated if email found in non-email field."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'ADDRESS_HOME_LINE1': 'bsmith@gmail.com',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-871-4567',}
self._FillFormAndSubmit(
[profile], 'duplicate_profiles_test.html', tab_index=0, windex=0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with email in a non-email field was '
'aggregated.')
def FormFillLatencyAfterSubmit(self):
"""Test latency time on form submit with lots of stored Autofill profiles.
This test verifies when a profile is selected from the Autofill dictionary
that consists of thousands of profiles, the form does not hang after being
submitted.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
This test is partially automated. The bulk of the work is done, such as
generating 1500 plus profiles, inserting those profiles into Autofill,
selecting a profile from the list. The tester will need to click on the
submit button and check if the browser hangs.
"""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'latency_after_submit_test.html')
# Run the generator script to generate the dictionary list needed for the
# profiles.
gen = autofill_dataset_generator.DatasetGenerator(
logging_level=logging.ERROR)
list_of_dict = gen.GenerateDataset(num_of_dict_to_generate=1501)
self.FillAutofillProfile(profiles=list_of_dict)
self.NavigateToURL(url)
self.assertTrue(self.AutofillPopulateForm('NAME_FIRST'),
msg='Autofill form could not be populated.')
# TODO(dyu): add automated form hang or crash verification.
raw_input(
'Verify the test manually. Test hang time after submitting the form.')
def AutofillCrowdsourcing(self):
"""Test able to send POST request of web form to Autofill server.
The Autofill server processes the data offline, so it can take a few days
for the result to be detectable. Manual verification is required.
"""
# HTML file needs to be run from a specific http:// url to be able to verify
# the results a few days later by visiting the same url.
url = 'http://www.corp.google.com/~dyu/autofill/crowdsourcing-test.html'
# Adding crowdsourcing Autofill profile.
file_path = os.path.join(self.DataDir(), 'autofill', 'functional',
'crowdsource_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
# Autofill server captures 2.5% of the data posted.
# Looping 1000 times is a safe minimum to exceed the server's threshold or
# noise.
for i in range(1000):
fname = self.GetAutofillProfile()['profiles'][0]['NAME_FIRST'][0]
lname = self.GetAutofillProfile()['profiles'][0]['NAME_LAST'][0]
email = self.GetAutofillProfile()['profiles'][0]['EMAIL_ADDRESS'][0]
# Submit form to collect crowdsourcing data for Autofill.
self.NavigateToURL(url, 0, 0)
profile = {'fn': fname, 'ln': lname, 'em': email}
self._WaitForWebpageFormReadyToFillIn(profile, 0, 0)
js = ''.join(['document.getElementById("%s").value = "%s";' %
(key, value) for key, value in profile.iteritems()])
js += 'document.getElementById("testform").submit();'
self.SubmitAutofillForm(js, tab_index=0, windex=0)
def testSameAddressProfilesAddInPrefsDontMerge(self):
"""Test profiles added through prefs with same address do not merge."""
profileA = {'NAME_FIRST': ['John',],
'NAME_LAST': ['Doe',],
'ADDRESS_HOME_LINE1': ['123 Cherry St',],
'ADDRESS_HOME_CITY': ['Mountain View',],
'ADDRESS_HOME_STATE': ['CA',],
'ADDRESS_HOME_ZIP': ['94043',],
'PHONE_HOME_WHOLE_NUMBER': ['650-555-1234',],}
profileB = {'NAME_FIRST': ['Jane',],
'NAME_LAST': ['Smith',],
'ADDRESS_HOME_LINE1': ['123 Cherry St',],
'ADDRESS_HOME_CITY': ['Mountain View',],
'ADDRESS_HOME_STATE': ['CA',],
'ADDRESS_HOME_ZIP': ['94043',],
'PHONE_HOME_WHOLE_NUMBER': ['650-253-1234',],}
profiles_list = [profileA, profileB]
self.FillAutofillProfile(profiles=profiles_list)
self.assertEqual(2, len(self.GetAutofillProfile()['profiles']),
msg='Profiles in prefs with same address merged.')
def testMergeAggregatedProfilesWithSameAddress(self):
"""Test that profiles merge for aggregated data with same address.
The criterion for when two profiles are expected to be merged is when their
'Address Line 1' and 'City' data match. When two profiles are merged, any
remaining address fields are expected to be overwritten. Any non-address
fields should accumulate multi-valued data.
"""
self._AggregateProfilesIntoAutofillPrefs('dataset_2.txt')
# Expecting 3 profiles out of the original 14 within Autofill preferences
self.assertEqual(3, len(self.GetAutofillProfile()['profiles']),
msg='Aggregated profiles did not merge correctly.')
def testProfilesNotMergedWhenNoMinAddressData(self):
"""Test profiles are not merged without mininum address values.
Mininum address values needed during aggregation are: address line 1, city,
state, and zip code.
Profiles are merged when data for address line 1 and city match.
"""
self._AggregateProfilesIntoAutofillPrefs('dataset_no_address.txt')
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with no min address data was merged.')
def MergeAggregatedDuplicatedProfiles(self):
"""Test Autofill ability to merge duplicate profiles and throw away junk."""
num_of_profiles = self._AggregateProfilesIntoAutofillPrefs('dataset.txt')
# Verify total number of inputted profiles is greater than the final number
# of profiles after merging.
self.assertTrue(
num_of_profiles > len(self.GetAutofillProfile()['profiles']))
def _AggregateProfilesIntoAutofillPrefs(self, data):
"""Aggregate profiles from forms into Autofill preferences.
Args:
data: Name of the data set file.
Returns:
Number of profiles in the dictionary list.
"""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
'autofill', 'functional', 'duplicate_profiles_test.html')
# Run the parser script to generate the dictionary list needed for the
# profiles.
c = autofill_dataset_converter.DatasetConverter(
os.path.abspath(
os.path.join(self.DataDir(), 'autofill', 'functional', data)),
logging_level=logging.INFO) # Set verbosity to INFO, WARNING, ERROR.
list_of_dict = c.Convert()
for profile in list_of_dict:
self.NavigateToURL(url)
self._WaitForWebpageFormReadyToFillIn(profile, 0, 0)
js = ''.join(['document.getElementById("%s").value = "%s";' %
(key, value) for key, value in profile.iteritems()])
js += 'document.getElementById("testform").submit();'
self.SubmitAutofillForm(js, tab_index=0, windex=0)
return len(list_of_dict)
def _SelectOptionXpath(self, value):
"""Returns an xpath query used to select an item from a dropdown list.
Args:
value: Option selected for the drop-down list field.
Returns:
The value of the xpath query.
"""
return '//option[@value="%s"]' % value
def testPostalCodeAndStateLabelsBasedOnCountry(self):
"""Verify postal code and state labels based on selected country."""
data_file = os.path.join(self.DataDir(), 'autofill', 'functional',
'state_zip_labels.txt')
test_data = simplejson.loads(open(data_file).read())
page = settings.AutofillEditAddressDialog.FromNavigation(self._driver)
# Initial check of State and ZIP labels.
self.assertEqual('State', page.GetStateLabel())
self.assertEqual('ZIP code', page.GetPostalCodeLabel())
for country_code in test_data:
page.Fill(country_code=country_code)
# Compare postal code labels.
actual_postal_label = page.GetPostalCodeLabel()
self.assertEqual(
test_data[country_code]['postalCodeLabel'],
actual_postal_label,
msg=('Postal code label "%s" does not match Country "%s"' %
(actual_postal_label, country_code)))
# Compare state labels.
actual_state_label = page.GetStateLabel()
self.assertEqual(
test_data[country_code]['stateLabel'],
actual_state_label,
msg=('State label "%s" does not match Country "%s"' %
(actual_state_label, country_code)))
def testNoDuplicatePhoneNumsInPrefs(self):
"""Test duplicate phone numbers entered in prefs are removed."""
page = settings.AutofillEditAddressDialog.FromNavigation(self._driver)
non_duplicates = ['111-1111', '222-2222']
duplicates = ['111-1111']
page.Fill(phones=non_duplicates + duplicates)
self.assertEqual(non_duplicates, page.GetPhones(),
msg='Duplicate phone number in prefs unexpectedly saved.')
def testDisplayLineItemForEntriesWithNoCCNum(self):
"""Verify Autofill creates a line item for CC entries with no CC number."""
self.NavigateToURL('chrome://settings-frame/autofillEditCreditCard')
self._driver.find_element_by_id('name-on-card').send_keys('Jane Doe')
query_month = self._SelectOptionXpath('12')
query_year = self._SelectOptionXpath('2014')
self._driver.find_element_by_id('expiration-month').find_element_by_xpath(
query_month).click()
self._driver.find_element_by_id('expiration-year').find_element_by_xpath(
query_year).click()
self._driver.find_element_by_id(
'autofill-edit-credit-card-apply-button').click()
# Refresh the page to ensure the UI is up-to-date.
self._driver.refresh()
list_entry = self._driver.find_element_by_class_name('autofill-list-item')
self.assertTrue(list_entry.is_displayed)
self.assertEqual('Jane Doe', list_entry.text,
msg='Saved CC line item not same as what was entered.')
def _GetElementList(self, container_elem, fields_to_select):
"""Returns all sub elements of specific characteristics.
Args:
container_elem: An element that contains other elements.
fields_to_select: A list of fields to select with strings that
help create an xpath string, which in turn identifies
the elements needed.
For example: ['input', 'button']
['div[@id]', 'button[@disabled]']
['*[class="example"]']
Returns:
List of all subelements found in the container element.
"""
self.assertTrue(fields_to_select, msg='No fields specified for selection.')
fields_to_select = ['.//' + field for field in fields_to_select]
xpath_arg = ' | '.join(fields_to_select)
field_elems = container_elem.find_elements_by_xpath(xpath_arg)
return field_elems
def _GetElementInfo(self, element):
"""Returns visual comprehensive info about an element.
This function identifies the text of the correspoinding label when tab
ordering fails.
This info consists of:
The labels, buttons, ids, placeholder attribute values, or the element id.
Args:
element: The target element.
Returns:
A string that identifies the element in the page.
"""
element_info = ''
if element.tag_name == 'button':
element_info = element.text
element_info = (element_info or element.get_attribute('id') or
element.get_attribute('placeholder') or
element.get_attribute('class') or element.id)
return '%s: %s' % (element.tag_name, element_info)
def _LoadPageAndGetFieldList(self):
"""Navigate to autofillEditAddress page and finds the elements with focus.
These elements are of input, select, and button types.
Returns:
A list with all elements that can receive focus.
"""
url = 'chrome://settings-frame/autofillEditAddress'
self._driver.get(url)
container_elem = self._driver.find_element_by_id(
'autofill-edit-address-overlay')
# The container element contains input, select and button fields. Some of
# the buttons are disabled so they are ignored.
field_list = self._GetElementList(container_elem,
['input', 'select',
'button[not(@disabled)]'])
self.assertTrue(field_list, 'No fields found in "%s".' % url)
return field_list
def testTabOrderForEditAddress(self):
"""Verify the TAB ordering for Edit Address page is correct."""
tab_press = ActionChains(self._driver).send_keys(Keys.TAB)
field_list = self._LoadPageAndGetFieldList()
# Creates a dictionary where a field key returns the value of the next field
# in the field list. The last field of the field list is mapped to the first
# field of the field list.
field_nextfield_dict = dict(
zip(field_list, field_list[1:] + field_list[:1]))
# Tab pressed for the first time.
tab_press.perform()
# Wait until a field of |field_list| has received the focus.
self.WaitUntil(lambda:
self._driver.switch_to_active_element().id in
[f.id for f in field_list])
# The first field is expected to receive the focus.
self.assertEqual(self._driver.switch_to_active_element().id,
field_list[0].id,
msg='The first field did not receive tab focus.')
for field in field_list:
tab_press.perform()
# Wait until a field of |field_list|, other than the current field, has
# received the focus.
self.WaitUntil(lambda:
self._driver.switch_to_active_element().id != field.id and
self._driver.switch_to_active_element().id in
[f.id for f in field_list])
self.assertEqual(self._driver.switch_to_active_element().id,
field_nextfield_dict[field].id,
msg=('The TAB ordering is broken. Previous field: "%s"\n'
'Field expected to receive focus: "%s"\n'
'Field that received focus instead: "%s"')
% (self._GetElementInfo(field),
self._GetElementInfo(field_nextfield_dict[field]),
self._GetElementInfo(
self._driver.switch_to_active_element())))
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
atpohjal/or-tools | examples/python/data/nonogram_regular/nonogram_nonunique.py | 74 | 1268 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Nonogram problem from Gecode: Nonunique
# There are 43 solutions to this nonogram.
# http://www.gecode.org/gecode-doc-latest/classNonogram.html
#
rows = 15
row_rule_len = 4
row_rules = [
[0,0,2,2],
[0,0,2,2],
[0,0,0,4],
[0,0,1,1],
[0,0,1,1],
[1,1,1,1],
[0,0,1,1],
[0,0,1,4],
[0,1,1,1],
[0,1,1,4],
[0,0,1,3],
[0,0,1,2],
[0,0,0,5],
[0,0,2,2],
[0,0,3,3]
]
cols = 11
col_rule_len = 5
col_rules = [
[0,0,0,0,5],
[0,0,1,2,4],
[0,0,2,1,3],
[0,2,2,1,1],
[0,1,1,1,1],
[0,0,0,1,5],
[2,1,1,3,2],
[2,1,1,1,1],
[0,0,1,4,1],
[0,0,0,1,1],
[0,0,0,0,1]
]
| apache-2.0 |
F5Networks/f5-ansible-modules | ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_device_license.py | 2 | 7500 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_device_license import (
ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
license_key='xxxx-yyyy-zzzz',
license_server='foo-license.f5.com',
state='latest',
accept_eula=True
)
p = ModuleParameters(params=args)
assert p.license_key == 'xxxx-yyyy-zzzz'
assert p.license_server == 'foo-license.f5.com'
assert p.state == 'latest'
assert p.accept_eula is True
class TestModuleManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_device_license.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = True
def tearDown(self):
self.patcher1.stop()
self.p2.stop()
def test_create(self, *args):
set_module_args(
dict(
license_key='xxxx-yyyy-zzzz',
license_server='foo-license.f5.com',
accept_eula=True,
state='latest',
provider=dict(
server='localhost',
password='password',
user='admin'
)
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.read_dossier_from_device = Mock(return_value=True)
mm.generate_license_from_remote = Mock(return_value=True)
mm.upload_license_to_device = Mock(return_value=True)
mm.upload_eula_to_device = Mock(return_value=True)
mm.reload_license = Mock(return_value=True)
mm._is_mcpd_ready_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_renewal(self, *args):
set_module_args(
dict(
license_key='xxxx-yyyy-zzzz',
license_server='foo-license.f5.com',
accept_eula=True,
state='latest',
provider=dict(
server='localhost',
password='password',
user='admin'
)
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, True])
mm.is_revoked = Mock(return_value=False)
mm.license_valid = Mock(return_value=False)
mm.read_dossier_from_device = Mock(return_value=True)
mm.generate_license_from_remote = Mock(return_value=True)
mm.upload_license_to_device = Mock(return_value=True)
mm.upload_eula_to_device = Mock(return_value=True)
mm.reload_license = Mock(return_value=True)
mm._is_mcpd_ready_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_no_renewal(self, *args):
set_module_args(
dict(
license_key='xxxx-yyyy-zzzz',
license_server='foo-license.f5.com',
accept_eula=True,
state='latest',
provider=dict(
server='localhost',
password='password',
user='admin'
)
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, True])
mm.is_revoked = Mock(return_value=False)
mm.license_valid = Mock(return_value=True)
mm.read_dossier_from_device = Mock(return_value=True)
mm.generate_license_from_remote = Mock(return_value=True)
mm.upload_license_to_device = Mock(return_value=True)
mm.upload_eula_to_device = Mock(return_value=True)
mm.reload_license = Mock(return_value=True)
mm._is_mcpd_ready_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is False
def test_force_renewal(self, *args):
set_module_args(
dict(
license_key='xxxx-yyyy-zzzz',
license_server='foo-license.f5.com',
accept_eula=True,
state='latest',
force=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, True])
mm.is_revoked = Mock(return_value=False)
mm.license_valid = Mock(return_value=True)
mm.read_dossier_from_device = Mock(return_value=True)
mm.generate_license_from_remote = Mock(return_value=True)
mm.upload_license_to_device = Mock(return_value=True)
mm.upload_eula_to_device = Mock(return_value=True)
mm.reload_license = Mock(return_value=True)
mm._is_mcpd_ready_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| mit |
hirokiky/django-webcommands | webcommands/forms.py | 1 | 1779 | from django import forms
from django.utils import six
from webcommands import utils as webcommands_utils
def field_for_option(option):
if option.type == 'string':
field = forms.CharField(label=str(option), max_length='255')
elif option.type == 'int':
field = forms.IntegerField(label=str(option))
elif option.type == 'long':
field = forms.IntegerField(label=str(option))
elif option.type == 'choice':
choices = zip(map(lambda x: x.upper(), option.choices), option.choices)
field = forms.ChoiceField(label=str(option),
choices=choices)
else:
field = forms.CharField(label=str(option), max_length=255)
return field
class CommandFormMetaClass(type):
def __new__(cls, name, bases, attrs):
super_new = super(CommandFormMetaClass, cls).__new__
new_class = super_new(cls, name, bases, attrs)
if 'command_class' in attrs:
command_class = attrs['command_class']
fields = {str(option): field_for_option(option)
for option in command_class.option_list}
else:
fields = {}
new_class.base_fields = fields
return new_class
class BaseCommandForm(forms.BaseForm):
def execute(self):
pass
class CommandForm(six.with_metaclass(CommandFormMetaClass, BaseCommandForm)):
pass
def commandform_factory(command_class):
"""Factory to return CommandForm correspond to gotten command instance
"""
command_name = command_class.__module__.rsplit('.', 1)[-1]
command_name = webcommands_utils.funcname_to_classname(command_name)
attrs = {'command_class': command_class}
return type(command_name + str('CommandForm'), (CommandForm,), attrs)
| mit |
40223208/2015cdb_g4 | static/Brython3.1.1-20150328-091302/Lib/random.py | 518 | 26080 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
# Module adapted for Brython : remove expensive imports
#from warnings import warn as _warn
def _warn(msg):
print(msg)
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from browser import window
def _randint(a, b):
return int(window.Math.random()*(b-a+1)+a)
#from os import urandom as _urandom
def _urandom(n):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
randbytes= [_randint(0,255) for i in range(n)]
return bytes(randbytes)
#from collections.abc import Set as _Set, Sequence as _Sequence
_Set = set
_Sequence = [str, list]
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
a = int.from_bytes(_urandom(32), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(self.random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
random = self.random
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| gpl-3.0 |
YuriIvanov/qBittorrent | src/searchengine/nova/engines/torrentz.py | 18 | 5585 | #VERSION: 2.16
#AUTHORS: Diego de las Heras (diegodelasheras@gmail.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from urllib import urlencode
class torrentz(object):
# mandatory properties
url = 'https://torrentz.eu'
name = 'Torrentz'
supported_categories = {'all': ''}
trackers_list = ['udp://open.demonii.com:1337/announce',
'udp://tracker.openbittorrent.com:80/announce',
'udp://tracker.leechers-paradise.org:6969',
'udp://tracker.coppersurfer.tk:6969',
'udp://9.rarbg.com:2710/announce']
class MyHtmlParser(HTMLParser):
def __init__(self, results, url, trackers):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.trackers = trackers
self.td_counter = None
self.current_item = None
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if tag == 'a':
if 'href' in params:
self.current_item = {}
self.td_counter = 0
self.current_item['link'] = 'magnet:?xt=urn:btih:' + \
params['href'].strip(' /') + self.trackers
self.current_item['desc_link'] = self.url + params['href'].strip()
elif tag == 'span':
if self.td_counter is not None:
self.td_counter += 1
if 'class' in params and params['class'] == 'pe': # hack to avoid Pending
self.td_counter += 2
if self.td_counter > 6: # safety
self.td_counter = None
def handle_data(self, data):
if self.td_counter == 0:
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name'] += data
elif self.td_counter == 4:
if 'size' not in self.current_item:
self.current_item['size'] = data.strip()
if self.current_item['size'] == 'Pending':
self.current_item['size'] = ''
elif self.td_counter == 5:
if 'seeds' not in self.current_item:
self.current_item['seeds'] = data.strip().replace(',', '')
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
elif self.td_counter == 6:
if 'leech' not in self.current_item:
self.current_item['leech'] = data.strip().replace(',', '')
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
# display item
self.td_counter = None
self.current_item['engine_url'] = self.url
if self.current_item['name'].find(' \xc2'):
self.current_item['name'] = self.current_item['name'].split(' \xc2')[0]
self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name']})
self.current_item['name'] = self.current_item['name'].decode('utf8')
prettyPrinter(self.current_item)
self.results.append('a')
def download_torrent(self, info):
print(download_file(info))
def search(self, what, cat='all'):
# initialize trackers for magnet links
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list)
results_list = []
parser = self.MyHtmlParser(results_list, self.url, trackers)
i = 0
while i < 6:
# "what" is already urlencoded
html = retrieve_url(self.url + '/any?f=%s&p=%d' % (what, i))
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
i += 1
parser.close()
| gpl-2.0 |
marco-mariotti/selenoprofiles | libraries/pygraph/algorithms/cycles.py | 11 | 3427 | # Copyright (c) 2008-2009 Pedro Matiello <pmatiello@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Cycle detection algorithms.
@sort: find_cycle
"""
# Imports
from pygraph.classes.exceptions import InvalidGraphType
from pygraph.classes.digraph import digraph as digraph_class
from pygraph.classes.graph import graph as graph_class
from sys import getrecursionlimit, setrecursionlimit
def find_cycle(graph):
"""
Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes.
"""
if (isinstance(graph, graph_class)):
directed = False
elif (isinstance(graph, digraph_class)):
directed = True
else:
raise InvalidGraphType
def find_cycle_to_ancestor(node, ancestor):
"""
Find a cycle containing both node and ancestor.
"""
path = []
while (node != ancestor):
if (node is None):
return []
path.append(node)
node = spanning_tree[node]
path.append(node)
path.reverse()
return path
def dfs(node):
"""
Depth-first search subfunction.
"""
visited[node] = 1
# Explore recursively the connected component
for each in graph[node]:
if (cycle):
return
if (each not in visited):
spanning_tree[each] = node
dfs(each)
else:
if (directed or spanning_tree[node] != each):
cycle.extend(find_cycle_to_ancestor(node, each))
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {} # List for marking visited and non-visited nodes
spanning_tree = {} # Spanning tree
cycle = []
# Algorithm outer-loop
for each in graph:
# Select a non-visited node
if (each not in visited):
spanning_tree[each] = None
# Explore node's connected component
dfs(each)
if (cycle):
setrecursionlimit(recursionlimit)
return cycle
setrecursionlimit(recursionlimit)
return []
| gpl-2.0 |
mitchmahan/pyeapi | pyeapi/api/interfaces.py | 3 | 31397 | #
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Module for working with interfaces in EOS
This module provides an API for pragmatically working with EOS interface
configurations. Interfaces include any data or management plane interface
available in EOS.
Parameters:
name (string): The name of the interface the configuration should be
applied to. The interface name is the full interface identifier.
shutdown (boolean): True if the interface is administratively disabled,
and False if the interface is administratively enable. This value
does not validate the interfaces operational state.
description (string): The interface description string. This value is
an arbitrary operator defined value.
sflow (boolean): True if sFlow is enabled on the interface otherwise
False
flowcontrol_send (string): The flowcontrol send configuration value for
the interface. Valid values are on or off
flowcontrol_receive (string): The flowcontrol receive configuration value
for the interface. Valid values are on or off
"""
import re
from pyeapi.api import EntityCollection
from pyeapi.utils import ProxyCall
MIN_LINKS_RE = re.compile(r'(?<=\s{3}min-links\s)(?P<value>.+)$', re.M)
DEFAULT_LACP_MODE = 'on'
VALID_INTERFACES = frozenset([
'Ethernet',
'Management',
'Loopback',
'Port-Channel',
'Vlan',
'Vxlan',
])
def isvalidinterface(value):
match = re.match(r'([EPVLM][a-z-C]+)', value)
return match and match.group() in VALID_INTERFACES
class Interfaces(EntityCollection):
def __init__(self, node, *args, **kwargs):
super(Interfaces, self).__init__(node, *args, **kwargs)
self._instances = dict()
def get(self, name):
return self.get_instance(name)[name]
def getall(self):
"""Returns all interfaces in a dict object.
Example:
{
"Ethernet1": {...},
"Ethernet2": {...}
}
Returns:
A Python dictionary object containing all interface
configuration indexed by interface name
"""
interfaces_re = re.compile(r'(?<=^interface\s)(.+)$', re.M)
response = dict()
for name in interfaces_re.findall(self.config):
interface = self.get(name)
if interface:
response[name] = interface
return response
def __getattr__(self, name):
return ProxyCall(self.marshall, name)
def get_instance(self, interface):
cls = INTERFACE_CLASS_MAP.get(interface[0:2]) or BaseInterface
if cls in self._instances:
instance = self._instances[cls]
else:
instance = cls(self.node)
self._instances[cls] = instance
return instance
def marshall(self, name, *args, **kwargs):
interface = args[0]
if not isvalidinterface(interface):
raise ValueError('invalid interface {}'.format(interface))
instance = self.get_instance(interface)
if not hasattr(instance, name):
raise AttributeError("'%s' object has no attribute '%s'" %
(instance, name))
method = getattr(instance, name)
return method(*args, **kwargs)
class BaseInterface(EntityCollection):
def __str__(self):
return 'Interface'
def get(self, name):
"""Returns a generic interface as a set of key/value pairs
This class is should normally serve as a base class for building more
specific interface resources. The attributes of this resource are
common to all interfaces regardless of type in EOS.
The generic interface resource returns the following:
* name (str): The name of the interface
* type (str): Always returns 'generic'
* shutdown (bool): True if the interface is shutdown
* description (str): The interface description value
Args:
name (str): The interface identifier to retrieve from the
running-configuration
Returns:
A Python dictionary object of key/value pairs that represents
the interface configuration. If the specified interface
does not exist, then None is returned
"""
config = self.get_block('^interface %s' % name)
if not config:
return None
resource = dict(name=name, type='generic')
resource.update(self._parse_shutdown(config))
resource.update(self._parse_description(config))
return resource
def _parse_shutdown(self, config):
"""Scans the specified config block and returns the shutdown value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the shutdown value retrieved
from the config block. The returned dict object is intended
to be merged into the interface resource dict
"""
value = 'no shutdown' not in config
return dict(shutdown=value)
def _parse_description(self, config):
"""Scans the specified config block and returns the description value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the description value retrieved
from the config block. If the description value is not
configured, None is returned as the value. The returned dict
is intended to be merged into the interface resource dict.
"""
value = None
match = re.search(r'description (.+)$', config, re.M)
if match:
value = match.group(1)
return dict(description=value)
def create(self, name):
"""Creates a new interface on the node
Note:
This method will attempt to create the interface regardless
if the interface exists or not. If the interface already exists
then this method will still return True
Args:
name (string): The full name of the interface.
Returns:
True if the interface could be created otherwise False (see Note)
"""
return self.configure('interface %s' % name)
def delete(self, name):
"""Deletes the interface from the node
Note:
This method will attempt to delete the interface from the nodes
operational config. If the interface does not exist then this
method will not perform any changes but still return True
Args:
name (string): The full name of the interface
Returns:
True if the interface could be deleted otherwise False (see Node)
"""
return self.configure('no interface %s' % name)
def default(self, name):
"""Defaults an interface in the running configuration
Args:
name (string): The full name of the interface
Returns:
True if the command operation succeeds otherwise False
"""
return self.configure('default interface %s' % name)
def set_description(self, name, value=None, default=False):
"""Configures the interface description
EosVersion:
4.13.7M
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (string): The value to set the description to.
default (boolean): Specifies to default the interface description
Returns:
True if the operation succeeds otherwise False
"""
string = 'description'
commands = self.command_builder(string, value=value, default=default)
return self.configure_interface(name, commands)
def set_shutdown(self, name, value=None, default=False):
"""Configures the interface shutdown state
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if the interface should be in shutdown state
otherwise False
default (boolean): Specifies to default the interface description
Returns:
True if the operation succeeds otherwise False is returned
"""
if value not in [True, False, None]:
raise ValueError('invalid value for shutdown')
commands = ['interface %s' % name]
if default:
commands.append('default shutdown')
elif value:
commands.append('shutdown')
else:
commands.append('no shutdown')
return self.configure(commands)
class EthernetInterface(BaseInterface):
def __str__(self):
return 'EthernetInterface'
def get(self, name):
"""Returns an interface as a set of key/value pairs
Example:
{
"name": <string>,
"type": "ethernet",
"sflow": [true, false],
"flowcontrol_send": [on, off],
"flowcontrol_receive": [on, off]
}
Args:
name (string): the interface identifier to retrieve the from
the configuration
Returns:
A Python dictionary object of key/value pairs that represent
the current configuration for the specified node. If the
specified interface name does not exist, then None is returned.
"""
config = self.get_block('^interface %s' % name)
if not config:
return None
resource = super(EthernetInterface, self).get(name)
resource.update(dict(name=name, type='ethernet'))
resource.update(self._parse_sflow(config))
resource.update(self._parse_flowcontrol_send(config))
resource.update(self._parse_flowcontrol_receive(config))
return resource
def _parse_sflow(self, config):
"""Scans the specified config block and returns the sflow value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the sflow value retrieved
from the config block. The returned dict object is intended
to be merged into the interface resource dict
"""
value = 'no sflow' not in config
return dict(sflow=value)
def _parse_flowcontrol_send(self, config):
"""Scans the config block and returns the flowcontrol send value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol send value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
"""
value = 'off'
match = re.search(r'flowcontrol send (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_send=value)
def _parse_flowcontrol_receive(self, config):
"""Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
"""
value = 'off'
match = re.search(r'flowcontrol receive (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_receive=value)
def create(self, name):
"""Creating Ethernet interfaces is currently not supported
Args:
name (string): The interface name
Raises:
NotImplementedError: creating Ethernet interfaces is not supported
"""
raise NotImplementedError('creating Ethernet interfaces is '
'not supported')
def delete(self, name):
"""Deleting Ethernet interfaces is currently not supported
Args:
name (string): The interface name
Raises:
NotImplementedError: Deleting Ethernet interfaces is not supported
"""
raise NotImplementedError('deleting Ethernet interfaces is '
'not supported')
def set_flowcontrol_send(self, name, value=None, default=False):
"""Configures the interface flowcontrol send value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
direction (string): one of either 'send' or 'receive'
value (boolean): True if the interface should be in shutdown state
otherwise False
default (boolean): Specifies to default the interface description
Returns:
True if the operation succeeds otherwise False is returned
"""
return self.set_flowcontrol(name, 'send', value, default)
def set_flowcontrol_receive(self, name, value=None, default=False):
"""Configures the interface flowcontrol receive value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if the interface should be in shutdown state
otherwise False
default (boolean): Specifies to default the interface description
Returns:
True if the operation succeeds otherwise False is returned
"""
return self.set_flowcontrol(name, 'receive', value, default)
def set_flowcontrol(self, name, direction, value=None, default=False):
"""Configures the interface flowcontrol value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
direction (string): one of either 'send' or 'receive'
value (boolean): True if the interface should be in shutdown state
otherwise False
default (boolean): Specifies to default the interface description
Returns:
True if the operation succeeds otherwise False is returned
"""
if value is not None:
if value not in ['on', 'off']:
raise ValueError('invalid flowcontrol value')
if direction not in ['send', 'receive']:
raise ValueError('invalid direction specified')
commands = ['interface %s' % name]
if default:
commands.append('default flowcontrol %s' % direction)
elif value:
commands.append('flowcontrol %s %s' % (direction, value))
else:
commands.append('no flowcontrol %s' % direction)
return self.configure(commands)
def set_sflow(self, name, value=None, default=False):
"""Configures the sFlow state on the interface
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if sFlow should be enabled otherwise False
default (boolean): Specifies the default value for sFlow
Returns:
True if the operation succeeds otherwise False is returned
"""
if value not in [True, False, None]:
raise ValueError
commands = ['interface %s' % name]
if default:
commands.append('default sflow')
elif value:
commands.append('sflow enable')
else:
commands.append('no sflow enable')
return self.configure(commands)
class PortchannelInterface(BaseInterface):
def __str__(self):
return 'PortchannelInterface'
def get(self, name):
"""Returns a Port-Channel interface as a set of key/value pairs
Example:
{
"name": <string>,
"type": "portchannel",
"members": <arrary of interface names>,
"minimum_links: <integer>,
"lacp_mode": [on, active, passive]
}
Args:
name (str): The interface identifier to retrieve from the
running-configuration
Returns:
A Python dictionary object of key/value pairs that represents
the interface configuration. If the specified interface
does not exist, then None is returned
"""
config = self.get_block('^interface %s' % name)
if not config:
return None
response = super(PortchannelInterface, self).get(name)
response.update(dict(name=name, type='portchannel'))
response['members'] = self.get_members(name)
response['lacp_mode'] = self.get_lacp_mode(name)
response.update(self._parse_minimum_links(config))
return response
def _parse_minimum_links(self, config):
value = 0
match = re.search(r'port-channel min-links (\d+)', config)
if match:
value = int(match.group(1))
return dict(minimum_links=value)
def get_lacp_mode(self, name):
"""Returns the LACP mode for the specified Port-Channel interface
Args:
name(str): The Port-Channel interface name to return the LACP
mode for from the configuration
Returns:
The configured LACP mode for the interface. Valid mode values
are 'on', 'passive', 'active'
"""
members = self.get_members(name)
if not members:
return DEFAULT_LACP_MODE
for member in self.get_members(name):
match = re.search(r'channel-group\s\d+\smode\s(?P<value>.+)',
self.get_block('^interface %s' % member))
return match.group('value')
def get_members(self, name):
"""Returns the member interfaces for the specified Port-Channel
Args:
name(str): The Port-channel interface name to return the member
interfaces for
Returns:
A list of physical interface names that belong to the specified
interface
"""
grpid = re.search(r'(\d+)', name).group()
command = 'show port-channel %s all-ports' % grpid
config = self.node.enable(command, 'text')
return re.findall(r'Ethernet[\d/]*', config[0]['result']['output'])
def set_members(self, name, members):
"""Configures the array of member interfaces for the Port-Channel
Args:
name(str): The Port-Channel interface name to configure the member
interfaces
members(list): The list of Ethernet interfaces that should be
member interfaces
Returns:
True if the operation succeeds otherwise False
"""
current_members = self.get_members(name)
lacp_mode = self.get_lacp_mode(name)
grpid = re.search(r'(\d+)', name).group()
commands = list()
# remove members from the current port-channel interface
for member in set(current_members).difference(members):
commands.append('interface %s' % member)
commands.append('no channel-group %s' % grpid)
# add new member interfaces to the port-channel interface
for member in set(members).difference(current_members):
commands.append('interface %s' % member)
commands.append('channel-group %s mode %s' % (grpid, lacp_mode))
return self.configure(commands) if commands else True
def set_lacp_mode(self, name, mode):
"""Configures the LACP mode of the member interfaces
Args:
name(str): The Port-Channel interface name to configure the
LACP mode
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'
Returns:
True if the operation succeeds otherwise False
"""
if mode not in ['on', 'passive', 'active']:
return False
grpid = re.search(r'(\d+)', name).group()
remove_commands = list()
add_commands = list()
for member in self.get_members(name):
remove_commands.append('interface %s' % member)
remove_commands.append('no channel-group %s' % grpid)
add_commands.append('interface %s' % member)
add_commands.append('channel-group %s mode %s' % (grpid, mode))
return self.configure(remove_commands + add_commands)
def set_minimum_links(self, name, value=None, default=False):
"""Configures the Port-Channel min-links value
Args:
name(str): The Port-Channel interface name
value(str): The value to configure the min-links
default (bool): Specifies to default the interface description
Returns:
True if the operation succeeds otherwise False is returned
"""
commands = ['interface %s' % name]
if default:
commands.append('default port-channel min-links')
elif value is not None:
commands.append('port-channel min-links %s' % value)
else:
commands.append('no port-channel min-links')
return self.configure(commands)
class VxlanInterface(BaseInterface):
DEFAULT_SRC_INTF = ''
DEFAULT_MCAST_GRP = ''
def __str__(self):
return 'VxlanInterface'
def get(self, name):
"""Returns a Vxlan interface as a set of key/value pairs
The Vxlan interface resource returns the following:
* name (str): The name of the interface
* type (str): Always returns 'vxlan'
* source_interface (str): The vxlan source-interface value
* multicast_group (str): The vxlan multicast-group value
* udp_port (int): The vxlan udp-port value
* vlans (dict): The vlan to vni mappings
* flood_list (list): The list of global VTEP flood list
Args:
name (str): The interface identifier to retrieve from the
running-configuration
Returns:
A Python dictionary object of key/value pairs that represents
the interface configuration. If the specified interface
does not exist, then None is returned
"""
config = self.get_block('^interface %s' % name)
if not config:
return None
response = super(VxlanInterface, self).get(name)
response.update(dict(name=name, type='vxlan'))
response.update(self._parse_source_interface(config))
response.update(self._parse_multicast_group(config))
response.update(self._parse_udp_port(config))
response.update(self._parse_vlans(config))
response.update(self._parse_flood_list(config))
return response
def _parse_source_interface(self, config):
""" Parses the conf block and returns the vxlan source-interface value
Parses the provided configuration block and returns the value of
vxlan source-interface. If the value is not configured, this method
will return DEFAULT_SRC_INTF instead.
Args:
config (str): The Vxlan config block to scan
Return:
dict: A dict object intended to be merged into the resource dict
"""
match = re.search(r'vxlan source-interface ([^\s]+)', config)
value = match.group(1) if match else self.DEFAULT_SRC_INTF
return dict(source_interface=value)
def _parse_multicast_group(self, config):
match = re.search(r'vxlan multicast-group ([^\s]+)', config)
value = match.group(1) if match else self.DEFAULT_MCAST_GRP
return dict(multicast_group=value)
def _parse_udp_port(self, config):
match = re.search(r'vxlan udp-port (\d+)', config)
value = int(match.group(1))
return dict(udp_port=value)
def _parse_vlans(self, config):
vlans = frozenset(re.findall(r'vxlan vlan (\d+)', config))
values = dict()
for vid in vlans:
values[vid] = dict()
regexp = r'vxlan vlan {} vni (\d+)'.format(vid)
match = re.search(regexp, config)
values[vid]['vni'] = match.group(1) if match else None
regexp = r'vxlan vlan {} flood vtep (.*)$'.format(vid)
matches = re.search(regexp, config, re.M)
flood_list = matches.group(1).split(' ') if matches else []
values[vid]['flood_list'] = flood_list
return dict(vlans=values)
def _parse_flood_list(self, config):
match = re.search(r'vxlan flood vtep (.+)$', config, re.M)
values = list()
if match:
values = match.group(1).split(' ')
return dict(flood_list=values)
def set_source_interface(self, name, value=None, default=False):
"""Configures the Vxlan source-interface value
EosVersion:
4.13.7M
Args:
name(str): The interface identifier to configure, defaults to
Vxlan1
value(str): The value to configure the source-interface to
default(bool): Configures the source-interface value to default
Returns:
True if the operation succeeds otherwise False
"""
string = 'vxlan source-interface'
cmds = self.command_builder(string, value=value, default=default)
return self.configure_interface(name, cmds)
def set_multicast_group(self, name, value=None, default=False):
"""Configures the Vxlan multicast-group value
EosVersion:
4.13.7M
Args:
name(str): The interface identifier to configure, defaults to
Vxlan1
value(str): The value to configure the multicast-group to
default(bool): Configures the mulitcat-group value to default
Returns:
True if the operation succeeds otherwise False
"""
string = 'vxlan multicast-group'
cmds = self.command_builder(string, value=value, default=default)
return self.configure_interface(name, cmds)
def set_udp_port(self, name, value=None, default=False):
"""Configures vxlan udp-port value
EosVersion:
4.13.7M
Args:
name(str): The name of the interface to configure
value(str): The value to set udp-port to
default(bool): Configure using the default keyword
Returns:
True if the operation succeeds otherwise False
"""
string = 'vxlan udp-port'
cmds = self.command_builder(string, value=value, default=default)
return self.configure_interface(name, cmds)
def add_vtep(self, name, vtep, vlan=None):
"""Adds a new VTEP endpoint to the global or local flood list
EosVersion:
4.13.7M
Args:
name (str): The name of the interface to configure
vtep (str): The IP address of the remote VTEP endpoint to add
vlan (str): The VLAN ID associated with this VTEP. If the VLAN
keyword is used, then the VTEP is configured as a local flood
endpoing
Returns:
True if the command completes successfully
"""
if not vlan:
cmd = 'vxlan flood vtep add {}'.format(vtep)
else:
cmd = 'vxlan vlan {} flood vtep add {}'.format(vlan, vtep)
return self.configure_interface(name, cmd)
def remove_vtep(self, name, vtep, vlan=None):
"""Removes a VTEP endpoint from the global or local flood list
EosVersion:
4.13.7M
Args:
name (str): The name of the interface to configure
vtep (str): The IP address of the remote VTEP endpoint to add
vlan (str): The VLAN ID associated with this VTEP. If the VLAN
keyword is used, then the VTEP is configured as a local flood
endpoing
Returns:
True if the command completes successfully
"""
if not vlan:
cmd = 'vxlan flood vtep remove {}'.format(vtep)
else:
cmd = 'vxlan vlan {} flood vtep remove {}'.format(vlan, vtep)
return self.configure_interface(name, cmd)
def update_vlan(self, name, vid, vni):
"""Adds a new vlan to vni mapping for the interface
EosVersion:
4.13.7M
Args:
vlan (str, int): The vlan id to map to the vni
vni (str, int): The vni value to use
Returns:
True if the command completes successfully
"""
cmd = 'vxlan vlan %s vni %s' % (vid, vni)
return self.configure_interface(name, cmd)
def remove_vlan(self, name, vid):
"""Removes a vlan to vni mapping for the interface
EosVersion:
4.13.7M
Args:
vlan (str, int): The vlan id to map to the vni
Returns:
True if the command completes successfully
"""
return self.configure_interface(name, 'no vxlan vlan %s vni' % vid)
INTERFACE_CLASS_MAP = {
'Et': EthernetInterface,
'Po': PortchannelInterface,
'Vx': VxlanInterface
}
def instance(api):
return Interfaces(api)
| bsd-3-clause |
mpapierski/protobuf | python/google/protobuf/internal/containers.py | 224 | 10004 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains container classes to represent different protocol buffer types.
This file defines container classes which represent categories of protocol
buffer field types which need extra maintenance. Currently these categories
are:
- Repeated scalar fields - These are all repeated fields which aren't
composite (e.g. they are of simple types like int32, string, etc).
- Repeated composite fields - Repeated fields which are composite. This
includes groups and nested messages.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class BaseContainer(object):
"""Base container class."""
# Minimizes memory usage and disallows assignment to other attributes.
__slots__ = ['_message_listener', '_values']
def __init__(self, message_listener):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
"""
self._message_listener = message_listener
self._values = []
def __getitem__(self, key):
"""Retrieves item by the specified key."""
return self._values[key]
def __len__(self):
"""Returns the number of elements in the container."""
return len(self._values)
def __ne__(self, other):
"""Checks if another instance isn't equal to this one."""
# The concrete classes should define __eq__.
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __repr__(self):
return repr(self._values)
def sort(self, *args, **kwargs):
# Continue to support the old sort_function keyword argument.
# This is expected to be a rare occurrence, so use LBYL to avoid
# the overhead of actually catching KeyError.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._values.sort(*args, **kwargs)
class RepeatedScalarFieldContainer(BaseContainer):
"""Simple, type-checked, list-like container for holding repeated scalars."""
# Disallows assignment to other attributes.
__slots__ = ['_type_checker']
def __init__(self, message_listener, type_checker):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
type_checker: A type_checkers.ValueChecker instance to run on elements
inserted into this container.
"""
super(RepeatedScalarFieldContainer, self).__init__(message_listener)
self._type_checker = type_checker
def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._type_checker.CheckValue(value)
self._values.append(value)
if not self._message_listener.dirty:
self._message_listener.Modified()
def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._type_checker.CheckValue(value)
self._values.insert(key, value)
if not self._message_listener.dirty:
self._message_listener.Modified()
def extend(self, elem_seq):
"""Extends by appending the given sequence. Similar to list.extend()."""
if not elem_seq:
return
new_values = []
for elem in elem_seq:
self._type_checker.CheckValue(elem)
new_values.append(elem)
self._values.extend(new_values)
self._message_listener.Modified()
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
"""
self._values.extend(other._values)
self._message_listener.Modified()
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified()
def __setitem__(self, key, value):
"""Sets the item on the specified position."""
self._type_checker.CheckValue(value)
self._values[key] = value
self._message_listener.Modified()
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __setslice__(self, start, stop, values):
"""Sets the subset of items from between the specified indices."""
new_values = []
for value in values:
self._type_checker.CheckValue(value)
new_values.append(value)
self._values[start:stop] = new_values
self._message_listener.Modified()
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
# Special case for the same type which should be common and fast.
if isinstance(other, self.__class__):
return other._values == self._values
# We are presumably comparing against some other sequence type.
return other == self._values
class RepeatedCompositeFieldContainer(BaseContainer):
"""Simple, list-like container for holding repeated composite fields."""
# Disallows assignment to other attributes.
__slots__ = ['_message_descriptor']
def __init__(self, message_listener, message_descriptor):
"""
Note that we pass in a descriptor instead of the generated directly,
since at the time we construct a _RepeatedCompositeFieldContainer we
haven't yet necessarily initialized the type that will be contained in the
container.
Args:
message_listener: A MessageListener implementation.
The RepeatedCompositeFieldContainer will call this object's
Modified() method when it is modified.
message_descriptor: A Descriptor instance describing the protocol type
that should be present in this container. We'll use the
_concrete_class field of this descriptor when the client calls add().
"""
super(RepeatedCompositeFieldContainer, self).__init__(message_listener)
self._message_descriptor = message_descriptor
def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
"""
new_element = self._message_descriptor._concrete_class(**kwargs)
new_element._SetListener(self._message_listener)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
message_class = self._message_descriptor._concrete_class
listener = self._message_listener
values = self._values
for message in elem_seq:
new_element = message_class()
new_element._SetListener(listener)
new_element.MergeFrom(message)
values.append(new_element)
listener.Modified()
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one, copying each individual message.
"""
self.extend(other._values)
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified()
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
return self._values == other._values
| bsd-3-clause |
irwinlove/django | django/contrib/gis/gdal/raster/source.py | 297 | 13274 | import json
import os
from ctypes import addressof, byref, c_double, c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.band import BandList
from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS
from django.contrib.gis.gdal.srs import SpatialReference, SRSException
from django.contrib.gis.geometry.regex import json_regex
from django.utils import six
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible,
)
from django.utils.functional import cached_property
class TransformPoint(list):
indices = {
'origin': (0, 3),
'scale': (1, 5),
'skew': (2, 4),
}
def __init__(self, raster, prop):
x = raster.geotransform[self.indices[prop][0]]
y = raster.geotransform[self.indices[prop][1]]
list.__init__(self, [x, y])
self._raster = raster
self._prop = prop
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][0]] = value
self._raster.geotransform = gtf
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][1]] = value
self._raster.geotransform = gtf
@python_2_unicode_compatible
class GDALRaster(GDALBase):
"""
Wraps a raster GDAL Data Source object.
"""
def __init__(self, ds_input, write=False):
self._write = 1 if write else 0
Driver.ensure_registered()
# Preprocess json inputs. This converts json strings to dictionaries,
# which are parsed below the same way as direct dictionary inputs.
if isinstance(ds_input, six.string_types) and json_regex.match(ds_input):
ds_input = json.loads(ds_input)
# If input is a valid file path, try setting file as source.
if isinstance(ds_input, six.string_types):
if not os.path.exists(ds_input):
raise GDALException('Unable to read raster source input "{}"'.format(ds_input))
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get('driver', 'MEM'))
# For out of memory drivers, check filename argument
if driver.name != 'MEM' and 'name' not in ds_input:
raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))
# Check if width and height where specified
if 'width' not in ds_input or 'height' not in ds_input:
raise GDALException('Specify width and height attributes for JSON or dict input.')
# Check if srid was specified
if 'srid' not in ds_input:
raise GDALException('Specify srid for JSON or dict input.')
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get('name', '')),
ds_input['width'],
ds_input['height'],
ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
ds_input.get('datatype', 6),
None
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get('bands', [])):
band = self.bands[i]
band.data(band_input['data'])
if 'nodata_value' in band_input:
band.nodata_value = band_input['nodata_value']
# Set SRID
self.srs = ds_input.get('srid')
# Set additional properties if provided
if 'origin' in ds_input:
self.origin.x, self.origin.y = ds_input['origin']
if 'scale' in ds_input:
self.scale.x, self.scale.y = ds_input['scale']
if 'skew' in ds_input:
self.skew.x, self.skew.y = ds_input['skew']
elif isinstance(ds_input, c_void_p):
# Instantiate the object using an existing pointer to a gdal raster.
self._ptr = ds_input
else:
raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))
def __del__(self):
if self._ptr and capi:
capi.close_ds(self._ptr)
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return '<Raster object at %s>' % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException('Raster needs to be opened in write mode to change values.')
capi.flush_ds(self._ptr)
@property
def name(self):
"""
Returns the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Returns the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Returns the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type='wkt')
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Sets the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, six.integer_types + six.string_types):
srs = SpatialReference(value)
else:
raise ValueError('Could not create a SpatialReference from input.')
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def geotransform(self):
"""
Returns the geotransform of the data source.
Returns the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Sets the geotransform for the data source."
if sum([isinstance(x, (int, float)) for x in values]) != 6:
raise ValueError('Geotransform must consist of 6 numeric values.')
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, 'origin')
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, 'scale')
@property
def skew(self):
"""
Skew of pixels (rotation parameters).
"""
return TransformPoint(self, 'skew')
@property
def extent(self):
"""
Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@property
def bands(self):
return BandList(self)
def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0):
"""
Returns a warped GDALRaster with the given input characteristics.
The input is expected to be a dictionary containing the parameters
of the target raster. Allowed values are width, height, SRID, origin,
scale, skew, datatype, driver, and name (filename).
By default, the warp functions keeps all parameters equal to the values
of the original source raster. For the name of the target raster, the
name of the source raster will be used and appended with
_copy. + source_driver_name.
In addition, the resampling algorithm can be specified with the "resampling"
input parameter. The default is NearestNeighbor. For a list of all options
consult the GDAL_RESAMPLE_ALGORITHMS constant.
"""
# Get the parameters defining the geotransform, srid, and size of the raster
if 'width' not in ds_input:
ds_input['width'] = self.width
if 'height' not in ds_input:
ds_input['height'] = self.height
if 'srid' not in ds_input:
ds_input['srid'] = self.srs.srid
if 'origin' not in ds_input:
ds_input['origin'] = self.origin
if 'scale' not in ds_input:
ds_input['scale'] = self.scale
if 'skew' not in ds_input:
ds_input['skew'] = self.skew
# Get the driver, name, and datatype of the target raster
if 'driver' not in ds_input:
ds_input['driver'] = self.driver.name
if 'name' not in ds_input:
ds_input['name'] = self.name + '_copy.' + self.driver.name
if 'datatype' not in ds_input:
ds_input['datatype'] = self.bands[0].datatype()
# Set the number of bands
ds_input['nr_of_bands'] = len(self.bands)
# Create target raster
target = GDALRaster(ds_input, write=True)
# Copy nodata values to warped raster
for index, band in enumerate(self.bands):
target.bands[index].nodata_value = band.nodata_value
# Select resampling algorithm
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Reproject image
capi.reproject_image(
self._ptr, self.srs.wkt.encode(),
target._ptr, target.srs.wkt.encode(),
algorithm, 0.0, max_error,
c_void_p(), c_void_p(), c_void_p()
)
# Make sure all data is written to file
target._flush()
return target
def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour',
max_error=0.0):
"""
Returns a copy of this raster reprojected into the given SRID.
"""
# Convert the resampling algorithm name into an algorithm id
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Instantiate target spatial reference system
target_srs = SpatialReference(srid)
# Create warped virtual dataset in the target reference system
target = capi.auto_create_warped_vrt(
self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(),
algorithm, max_error, c_void_p()
)
target = GDALRaster(target)
# Construct the target warp dictionary from the virtual raster
data = {
'srid': srid,
'width': target.width,
'height': target.height,
'origin': [target.origin.x, target.origin.y],
'scale': [target.scale.x, target.scale.y],
'skew': [target.skew.x, target.skew.y],
}
# Set the driver and filepath if provided
if driver:
data['driver'] = driver
if name:
data['name'] = name
# Warp the raster into new srid
return self.warp(data, resampling=resampling, max_error=max_error)
| bsd-3-clause |
ksurct/MercuryRoboticsEmbedded2016 | ksurobot/hardware/_wiringpi.py | 1 | 1766 | from ctypes import cdll, c_int, CFUNCTYPE, POINTER, pointer, c_ubyte
from enum import Enum, IntEnum
from ..util import get_config
from .utils import Wrapper
libwiringpi = Wrapper('/usr/local/lib/libwiringPi.so.2.32')
wiringPiISR_cb = CFUNCTYPE(None)
wiringPiPiSPIDataRW_data = POINTER(c_ubyte)
class PinModes(IntEnum):
INPUT = 0
OUTPUT = 1
PWM_OUTPUT = 2
GPIO_CLOCK = 3
SOFT_PWM_OUTPUT = 4
SOFT_TONE_OUTPUT = 5
PWM_TONE_OUTPUT = 6
class PullModes(IntEnum):
PUD_OFF = 0
PUD_DOWN = 1
PUD_UP = 2
class InterruptModes(IntEnum):
INT_EDGE_SETUP = 0
INT_EDGE_FALLING = 1
INT_EDGE_RISING = 2
INT_EDGE_BOTH = 3
@libwiringpi.wrap([], None)
def wiringPiSetup():
pass
@libwiringpi.wrap([], None)
def wiringPiSetupSys():
pass
@libwiringpi.wrap([], None)
def wiringPiSetupGpio():
pass
@libwiringpi.wrap([], None)
def wiringPiSetupPhys():
pass
@libwiringpi.wrap([c_int, c_int], None)
def pinModeAlt(pin, mode):
pass
@libwiringpi.wrap([c_int], None)
def pwmSetClock(speed):
pass
@libwiringpi.wrap([c_int, c_int], None)
def pinMode(pin, mode):
pass
@libwiringpi.wrap([c_int, c_int], None)
def pullUpDnControl(pin, pud):
pass
@libwiringpi.wrap([c_int], c_int)
def digitalRead(pin):
pass
@libwiringpi.wrap([c_int, c_int], None)
def digitalWrite(pin, value):
pass
@libwiringpi.wrap([c_int, c_int], None)
def pwmWrite(pin, value):
pass
@libwiringpi.wrap([c_int, c_int, wiringPiISR_cb], None)
def wiringPiISR(pin, mode, callback):
pass
@libwiringpi.wrap([c_int, c_int], c_int)
def wiringPiSPISetup (channel, speed):
pass
@libwiringpi.wrap([c_int, wiringPiPiSPIDataRW_data, c_int], c_int)
def wiringPiSPIDataRW (channel, data, len):
pass
| apache-2.0 |
RachaelT/UTDchess-RospyXbee | src/UTDchess_RospyXbee/src/scripts/xbee_coord.py | 2 | 4686 | #!/usr/bin/env python
import sys
import rospy
import serial
import struct
import binascii
import time
from std_msgs.msg import String
from chessbot.msg import BeeCommand
from xbee import ZigBee
xbee = None
XBEE_ADDR_LONG = '\x00\x00\x00\x00\x00\x00\xFF\xFF'
XBEE_ADDR_SHORT = '\xFF\xFE'
DEVICE = '/dev/ttyUSB0'
#Each bot will have an addr long, addr short, and id stored. Coordinator is always id 0.
bot_array = []
final_bot_array = []
def exit_handler():
#This sends a stop command to all the robots on the field when
#the program is ended.
stop = BeeCommand()
stop.command.direction = 0
stop.command.magnitude = 0
stop.command.turn = 0
stop.command.accel = 0
ser = serial.Serial(DEVICE, 57600)
xbee = ZigBee(ser)
xbee.tx(
dest_addr_long = XBEE_ADDR_LONG,
dest_addr = XBEE_ADDR_SHORT,
data = prepare_move_cmd(stop.command),
)
xbee.tx(
dest_addr_long = XBEE_ADDR_LONG,
dest_addr = XBEE_ADDR_SHORT,
data = prepare_move_cmd(stop.command),
)
xbee.tx(
dest_addr_long = XBEE_ADDR_LONG,
dest_addr = XBEE_ADDR_SHORT,
data = prepare_move_cmd(stop.command),
)
def find_bots():
global xbee
ser = serial.Serial(DEVICE, 57600)
xbee = ZigBee(ser)
try:
print("Searching for bots. This may take a moment.")
xbee.at(
dest_addr_long = XBEE_ADDR_LONG,
dest_addr = XBEE_ADDR_SHORT,
command = 'ND'
)
timeout = time.time() + 30
num_of_robots = 0
while timeout > time.time():
dictl = xbee.wait_read_frame()
if dictl == None:
break
bot_array.append(parse_ND(dictl))
num_of_robots += 1
except KeyboardInterrupt:
sys.exit(0)
def assemble_msg(info):
#Prepares a string with the Address information of each Xbee
msg = ''
msg += info['addr_long']
msg += info['addr_short']
msg += info['id']
return msg
def parse(frame):
#Parses the transmit status for relevant info
info = {
'length': str(len(frame)/2),
'frame_id': frame[0:2],
'addr': frame[2:8],
'retry': frame[8:10],
'status': frame[10:12]
}
return info
def parse_ND(frame):
#parses the node discovery response for relevant info
info = {
'length': str(len(frame)/2),
'frame_id': frame[2:4],
'command': frame[4:8],
'status': frame[8:10],
'addr_short': frame[10:14],
'addr_long': frame[14:30],
'id': frame[30:32]
}
return info
def hex_to_addr(adhex):
#Changes the hex address given by the dictionary
#to a format usable by the xbee. Works on long and short.
address = binascii.unhexlify(adhex)
print "Address found: %s " % binascii.hexlify(address)
return address
def prepare_move_cmd(msg):
code = chr(4)
#Packages the command message as binary for the API Frame
move = chr(msg.direction)
speed = chr(msg.magnitude)
turn = chr(msg.turn)
accel = chr(msg.accel)
data = struct.pack('ccccc', code, move, speed, turn, accel)
return data
def callback(data):
xbee.tx(
dest_addr_long = hex_to_addr(data.addr_long),
dest_addr = hex_to_addr(data.addr_short),
data = prepare_move_cmd(data.command),
)
#Prints the command being sent to the robot for debugging purposes
print "#######################################################"
rospy.log_info(data.command)
def listener():
#initializes the subscriber that receives the movement commands
global xbee
ser = serial.Serial(DEVICE, 57600)
xbee = ZigBee(ser)
print "Coordinator ready to receive commands."
#Every robot's Communicator publishes addresses and movement commands to this topic
rospy.Subscriber("/cmd_hex", BeeCommand, callback)
rospy.spin()
xbee.halt()
ser.close()
if __name__ == '__main__':
#Sets up the exit command
rospy.on_shutdown(exit_handler)
#Finds the robot addresses, and publishes them for the communicator templates to use
find_bots()
rospy.init_node('addr_publisher')
pub = rospy.Publisher('/bot_addrs', String, queue_size=1)
#Waits to publish messages until the topic is subscribed to,
#so that no addresses are lost
while not pub.get_num_connections() > 0:
time.sleep(.5)
for bot in bot_array:
addr_msg = String()
addr_msg = assemble_msg(bot)
pub.publish(addr_msg)
time.sleep(1)
pub.publish('end')
#Begins sending movement commands to Robots
listener()
| mit |
dmgawel/helios-server | helios/view_utils.py | 3 | 2349 | """
Utilities for all views
Ben Adida (12-30-2008)
"""
from django.template import Context, Template, loader
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response
import utils
from helios import datatypes
# nicely update the wrapper function
from functools import update_wrapper
from helios_auth.security import get_user
import helios
from django.conf import settings
##
## BASICS
##
SUCCESS = HttpResponse("SUCCESS")
# FIXME: error code
FAILURE = HttpResponse("FAILURE")
##
## template abstraction
##
def prepare_vars(request, vars):
vars_with_user = vars.copy()
vars_with_user['user'] = get_user(request)
# csrf protection
if request.session.has_key('csrf_token'):
vars_with_user['csrf_token'] = request.session['csrf_token']
vars_with_user['utils'] = utils
vars_with_user['settings'] = settings
vars_with_user['HELIOS_STATIC'] = '/static/helios/helios'
vars_with_user['TEMPLATE_BASE'] = helios.TEMPLATE_BASE
vars_with_user['CURRENT_URL'] = request.path
vars_with_user['SECURE_URL_HOST'] = settings.SECURE_URL_HOST
return vars_with_user
def render_template(request, template_name, vars = {}, include_user=True):
t = loader.get_template(template_name + '.html')
vars_with_user = prepare_vars(request, vars)
if not include_user:
del vars_with_user['user']
return render_to_response('helios/templates/%s.html' % template_name, vars_with_user)
def render_template_raw(request, template_name, vars={}):
t = loader.get_template(template_name)
# if there's a request, prep the vars, otherwise can't do it.
if request:
full_vars = prepare_vars(request, vars)
else:
full_vars = vars
c = Context(full_vars)
return t.render(c)
def render_json(json_txt):
return HttpResponse(json_txt, "application/json")
# decorator
def return_json(func):
"""
A decorator that serializes the output to JSON before returning to the
web client.
"""
def convert_to_json(self, *args, **kwargs):
return_val = func(self, *args, **kwargs)
try:
return render_json(utils.to_json(return_val))
except Exception, e:
import logging
logging.error("problem with serialization: " + str(return_val) + " / " + str(e))
raise e
return update_wrapper(convert_to_json,func)
| apache-2.0 |
mancoast/CPythonPyc_test | fail/300_buffer_tests.py | 3 | 10506 | # Tests that work for both bytes and buffer objects.
# See PEP 3137.
import struct
import sys
class MixinBytesBufferCommonTests(object):
"""Tests that work for both bytes and buffer objects.
See PEP 3137.
"""
def marshal(self, x):
"""Convert x into the appropriate type for these tests."""
raise RuntimeError('test class must provide a marshal method')
def test_islower(self):
self.assertFalse(self.marshal(b'').islower())
self.assert_(self.marshal(b'a').islower())
self.assertFalse(self.marshal(b'A').islower())
self.assertFalse(self.marshal(b'\n').islower())
self.assert_(self.marshal(b'abc').islower())
self.assertFalse(self.marshal(b'aBc').islower())
self.assert_(self.marshal(b'abc\n').islower())
self.assertRaises(TypeError, self.marshal(b'abc').islower, 42)
def test_isupper(self):
self.assertFalse(self.marshal(b'').isupper())
self.assertFalse(self.marshal(b'a').isupper())
self.assert_(self.marshal(b'A').isupper())
self.assertFalse(self.marshal(b'\n').isupper())
self.assert_(self.marshal(b'ABC').isupper())
self.assertFalse(self.marshal(b'AbC').isupper())
self.assert_(self.marshal(b'ABC\n').isupper())
self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42)
def test_istitle(self):
self.assertFalse(self.marshal(b'').istitle())
self.assertFalse(self.marshal(b'a').istitle())
self.assert_(self.marshal(b'A').istitle())
self.assertFalse(self.marshal(b'\n').istitle())
self.assert_(self.marshal(b'A Titlecased Line').istitle())
self.assert_(self.marshal(b'A\nTitlecased Line').istitle())
self.assert_(self.marshal(b'A Titlecased, Line').istitle())
self.assertFalse(self.marshal(b'Not a capitalized String').istitle())
self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle())
self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle())
self.assertFalse(self.marshal(b'NOT').istitle())
self.assertRaises(TypeError, self.marshal(b'abc').istitle, 42)
def test_isspace(self):
self.assertFalse(self.marshal(b'').isspace())
self.assertFalse(self.marshal(b'a').isspace())
self.assert_(self.marshal(b' ').isspace())
self.assert_(self.marshal(b'\t').isspace())
self.assert_(self.marshal(b'\r').isspace())
self.assert_(self.marshal(b'\n').isspace())
self.assert_(self.marshal(b' \t\r\n').isspace())
self.assertFalse(self.marshal(b' \t\r\na').isspace())
self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42)
def test_isalpha(self):
self.assertFalse(self.marshal(b'').isalpha())
self.assert_(self.marshal(b'a').isalpha())
self.assert_(self.marshal(b'A').isalpha())
self.assertFalse(self.marshal(b'\n').isalpha())
self.assert_(self.marshal(b'abc').isalpha())
self.assertFalse(self.marshal(b'aBc123').isalpha())
self.assertFalse(self.marshal(b'abc\n').isalpha())
self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42)
def test_isalnum(self):
self.assertFalse(self.marshal(b'').isalnum())
self.assert_(self.marshal(b'a').isalnum())
self.assert_(self.marshal(b'A').isalnum())
self.assertFalse(self.marshal(b'\n').isalnum())
self.assert_(self.marshal(b'123abc456').isalnum())
self.assert_(self.marshal(b'a1b3c').isalnum())
self.assertFalse(self.marshal(b'aBc000 ').isalnum())
self.assertFalse(self.marshal(b'abc\n').isalnum())
self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42)
def test_isdigit(self):
self.assertFalse(self.marshal(b'').isdigit())
self.assertFalse(self.marshal(b'a').isdigit())
self.assert_(self.marshal(b'0').isdigit())
self.assert_(self.marshal(b'0123456789').isdigit())
self.assertFalse(self.marshal(b'0123456789a').isdigit())
self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42)
def test_lower(self):
self.assertEqual(b'hello', self.marshal(b'HeLLo').lower())
self.assertEqual(b'hello', self.marshal(b'hello').lower())
self.assertRaises(TypeError, self.marshal(b'hello').lower, 42)
def test_upper(self):
self.assertEqual(b'HELLO', self.marshal(b'HeLLo').upper())
self.assertEqual(b'HELLO', self.marshal(b'HELLO').upper())
self.assertRaises(TypeError, self.marshal(b'hello').upper, 42)
def test_capitalize(self):
self.assertEqual(b' hello ', self.marshal(b' hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'hello ').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'aaaa').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'AaAa').capitalize())
self.assertRaises(TypeError, self.marshal(b'hello').capitalize, 42)
def test_ljust(self):
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(10))
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(6))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(3))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(2))
self.assertEqual(b'abc*******', self.marshal(b'abc').ljust(10, '*'))
self.assertRaises(TypeError, self.marshal(b'abc').ljust)
def test_rjust(self):
self.assertEqual(b' abc', self.marshal(b'abc').rjust(10))
self.assertEqual(b' abc', self.marshal(b'abc').rjust(6))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(3))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(2))
self.assertEqual(b'*******abc', self.marshal(b'abc').rjust(10, '*'))
self.assertRaises(TypeError, self.marshal(b'abc').rjust)
def test_center(self):
self.assertEqual(b' abc ', self.marshal(b'abc').center(10))
self.assertEqual(b' abc ', self.marshal(b'abc').center(6))
self.assertEqual(b'abc', self.marshal(b'abc').center(3))
self.assertEqual(b'abc', self.marshal(b'abc').center(2))
self.assertEqual(b'***abc****', self.marshal(b'abc').center(10, '*'))
self.assertRaises(TypeError, self.marshal(b'abc').center)
def test_swapcase(self):
self.assertEqual(b'hEllO CoMPuTErS',
self.marshal(b'HeLLo cOmpUteRs').swapcase())
self.assertRaises(TypeError, self.marshal(b'hello').swapcase, 42)
def test_zfill(self):
self.assertEqual(b'123', self.marshal(b'123').zfill(2))
self.assertEqual(b'123', self.marshal(b'123').zfill(3))
self.assertEqual(b'0123', self.marshal(b'123').zfill(4))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(3))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(4))
self.assertEqual(b'+0123', self.marshal(b'+123').zfill(5))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(3))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(4))
self.assertEqual(b'-0123', self.marshal(b'-123').zfill(5))
self.assertEqual(b'000', self.marshal(b'').zfill(3))
self.assertEqual(b'34', self.marshal(b'34').zfill(1))
self.assertEqual(b'0034', self.marshal(b'34').zfill(4))
self.assertRaises(TypeError, self.marshal(b'123').zfill)
def test_expandtabs(self):
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\r\nab\r\ndef\ng\r\nhi',
self.marshal(b'abc\r\nab\r\ndef\ng\r\nhi').expandtabs(4))
self.assertEqual(b' a\n b', self.marshal(b' \ta\n\tb').expandtabs(1))
self.assertRaises(TypeError, self.marshal(b'hello').expandtabs, 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.assertRaises(OverflowError,
self.marshal(b'\ta\n\tb').expandtabs, sys.maxsize)
def test_title(self):
self.assertEqual(b' Hello ', self.marshal(b' hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').title())
self.assertEqual(b'Format This As Title String',
self.marshal(b'fOrMaT thIs aS titLe String').title())
self.assertEqual(b'Format,This-As*Title;String',
self.marshal(b'fOrMaT,thIs-aS*titLe;String').title())
self.assertEqual(b'Getint', self.marshal(b'getInt').title())
self.assertRaises(TypeError, self.marshal(b'hello').title, 42)
def test_splitlines(self):
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\rghi').splitlines())
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi\n').splitlines())
self.assertEqual([b'abc', b'def', b'ghi', b''],
self.marshal(b'abc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(1))
self.assertRaises(TypeError, self.marshal(b'abc').splitlines, 42, 42)
| gpl-3.0 |
AZtheAsian/zulip | docs/conf.py | 8 | 9917 | # -*- coding: utf-8 -*-
#
# zulip-contributor-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 16:24:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
if False:
from typing import Any, Dict, List, Optional
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [] # type: List[str]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zulip'
copyright = u'2015-2016, The Zulip Team'
author = u'The Zulip Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4'
# The full version, including alpha/beta/rc tags.
release = '1.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None # type: Optional[str]
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Read The Docs can't import sphinx_rtd_theme, so don't import it there.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zulip-contributor-docsdoc'
def setup(app):
# type: (Any) -> None
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
} # type: Dict[str, str]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zulip-contributor-docs.tex', u'Zulip Documentation',
u'The Zulip Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
author, 'zulip-contributor-docs', 'Documentation for contributing to Zulip.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
| apache-2.0 |
manics/openmicroscopy | components/tools/OmeroWeb/omeroweb/webadmin/views.py | 3 | 43081 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2018 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>,
# 2008-2013.
#
# Version: 1.0
#
''' A view functions is simply a Python function that takes a Web request and
returns a Web response. This response can be the HTML contents of a Web page,
or a redirect, or the 404 and 500 error, or an XML document, or an image...
or anything.'''
import traceback
import logging
import datetime
import omeroweb.webclient.views
from omero_version import build_year
from omero_version import omero_version
from django.template import loader as template_loader
from django.core.urlresolvers import reverse
from django.views.decorators.debug import sensitive_post_parameters
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext as Context
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_str
from forms import ForgottonPasswordForm, ExperimenterForm, GroupForm
from forms import GroupOwnerForm, MyAccountForm, ChangePassword
from forms import UploadPhotoForm, EmailForm
from omeroweb.http import HttpJPEGResponse
from omeroweb.webclient.decorators import login_required, render_response
from omeroweb.connector import Connector
logger = logging.getLogger(__name__)
##############################################################################
# decorators
class render_response_admin(omeroweb.webclient.decorators.render_response):
"""
Subclass for adding additional data to the 'context' dict passed to
templates
"""
def prepare_context(self, request, context, *args, **kwargs):
"""
We extend the webclient render_response to check if any groups are
created.
If not, add an appropriate message to the template context
"""
super(render_response_admin, self).prepare_context(request, context,
*args, **kwargs)
if 'conn' not in kwargs:
return
conn = kwargs['conn']
noGroupsCreated = conn.isAnythingCreated()
if noGroupsCreated:
msg = _('User must be in a group - You have not created any'
' groups yet. Click <a href="%s">here</a> to create a'
' group') % (reverse(viewname="wamanagegroupid",
args=["new"]))
context['ome']['message'] = msg
context['ome']['email'] = request.session \
.get('server_settings', False) \
.get('email', False)
##############################################################################
# utils
import omero
from omero.model import PermissionsI
def prepare_experimenter(conn, eid=None):
if eid is None:
eid = conn.getEventContext().userId
experimenter = conn.getObject("Experimenter", eid)
defaultGroup = experimenter.getDefaultGroup()
otherGroups = list(experimenter.getOtherGroups())
hasAvatar = conn.hasExperimenterPhoto()
isLdapUser = experimenter.isLdapUser()
return experimenter, defaultGroup, otherGroups, isLdapUser, hasAvatar
def otherGroupsInitialList(groups, excluded_names=("user", "guest"),
excluded_ids=list()):
formGroups = list()
for gr in groups:
flag = False
if gr.name in excluded_names:
flag = True
if gr.id in excluded_ids:
flag = True
if not flag:
formGroups.append(gr)
formGroups.sort(key=lambda x: x.getName().lower())
return formGroups
def ownedGroupsInitial(conn, excluded_names=("user", "guest", "system"),
excluded_ids=list()):
groupsList = list(conn.listOwnedGroups())
ownedGroups = list()
for gr in groupsList:
flag = False
if gr.name in excluded_names:
flag = True
if gr.id in excluded_ids:
flag = True
if not flag:
ownedGroups.append(gr)
ownedGroups.sort(key=lambda x: x.getName().lower())
return ownedGroups
# myphoto helpers
def attach_photo(conn, newFile):
if newFile.content_type.startswith("image"):
f = newFile.content_type.split("/")
format = f[1].upper()
else:
format = newFile.content_type
conn.uploadMyUserPhoto(smart_str(newFile.name), format, newFile.read())
# permission helpers
def setActualPermissions(permissions):
permissions = int(permissions)
if permissions == 0:
p = PermissionsI("rw----")
elif permissions == 1:
p = PermissionsI("rwr---")
elif permissions == 2:
p = PermissionsI("rwra--")
elif permissions == 3:
p = PermissionsI("rwrw--")
else:
p = PermissionsI()
return p
def getActualPermissions(group):
p = None
if group.details.getPermissions() is None:
raise AttributeError('Object has no permissions')
else:
p = group.details.getPermissions()
flag = None
if p.isGroupWrite():
flag = 3
elif p.isGroupAnnotate():
flag = 2
elif p.isGroupRead():
flag = 1
elif p.isUserRead():
flag = 0
return flag
# getters
def getSelectedGroups(conn, ids):
if ids is not None and len(ids) > 0:
return list(conn.getObjects("ExperimenterGroup", ids))
return list()
def getSelectedExperimenters(conn, ids):
if ids is not None and len(ids) > 0:
return list(conn.getObjects("Experimenter", ids))
return list()
def mergeLists(list1, list2):
if not list1 and not list2:
return list()
if not list1:
return list(list2)
if not list2:
return list(list1)
result = list()
result.extend(list1)
result.extend(list2)
return set(result)
@login_required()
@render_response()
def drivespace_json(request, query=None, groupId=None, userId=None, conn=None,
**kwargs):
"""
Returns a json list of {"label":<Name>, "data": <Value>, "groupId /
userId": <id>} for plotting disk usage by users or groups.
If 'query' is "groups" or "users", this is for an Admin to show all data
on server divided into groups or users.
Else, if groupId is not None, we return data for that group, split by user.
Else, if userId is not None, we return data for that user, split by group.
"""
diskUsage = []
# diskUsage.append({"label": "Free space", "data":conn.getFreeSpace()})
queryService = conn.getQueryService()
ctx = conn.SERVICE_OPTS.copy()
params = omero.sys.ParametersI()
params.theFilter = omero.sys.Filter()
def getBytes(ctx, eid=None):
bytesInGroup = 0
pixelsQuery = "select sum(cast( p.sizeX as double ) * p.sizeY * p.sizeZ * p.sizeT * p.sizeC * pt.bitSize / 8) " \
"from Pixels p join p.pixelsType as pt join p.image i left outer join i.fileset f " \
"join p.details.owner as owner " \
"where f is null"
filesQuery = "select sum(origFile.size) from OriginalFile as origFile " \
"join origFile.details.owner as owner"
if eid is not None:
params.add('eid', omero.rtypes.rlong(eid))
pixelsQuery = pixelsQuery + " and owner.id = (:eid)"
filesQuery = filesQuery + " where owner.id = (:eid)"
# Calculate disk usage via Pixels
result = queryService.projection(pixelsQuery, params, ctx)
if len(result) > 0 and len(result[0]) > 0:
bytesInGroup += result[0][0].val
# Now get Original File usage
result = queryService.projection(filesQuery, params, ctx)
if len(result) > 0 and len(result[0]) > 0:
bytesInGroup += result[0][0]._val
return bytesInGroup
sr = conn.getAdminService().getSecurityRoles()
if query == 'groups':
for g in conn.listGroups():
# ignore 'user' and 'guest' groups
if g.getId() in (sr.guestGroupId, sr.userGroupId):
continue
ctx.setOmeroGroup(g.getId())
b = getBytes(ctx)
if b > 0:
diskUsage.append({"label": g.getName(), "data": b,
"groupId": g.getId()})
elif query == 'users':
ctx.setOmeroGroup('-1')
for e in conn.getObjects("Experimenter"):
b = getBytes(ctx, e.getId())
if b > 0:
diskUsage.append({"label": e.getNameWithInitial(), "data": b,
"userId": e.getId()})
elif userId is not None:
eid = long(userId)
for g in conn.getOtherGroups(eid):
# ignore 'user' and 'guest' groups
if g.getId() in (sr.guestGroupId, sr.userGroupId):
continue
ctx.setOmeroGroup(g.getId())
b = getBytes(ctx, eid)
if b > 0:
diskUsage.append({"label": g.getName(), "data": b,
"groupId": g.getId()})
# users within a single group
elif groupId is not None:
ctx.setOmeroGroup(groupId)
for e in conn.getObjects("Experimenter"):
b = getBytes(ctx, e.getId())
if b > 0:
diskUsage.append({"label": e.getNameWithInitial(),
"data": b, "userId": e.getId()})
diskUsage.sort(key=lambda x: x['data'], reverse=True)
return diskUsage
##############################################################################
# views control
def forgotten_password(request, **kwargs):
request.session.modified = True
template = "webadmin/forgotten_password.html"
conn = None
error = None
def getGuestConnection(server_id):
return Connector(server_id, True).create_guest_connection('OMERO.web')
if request.method == 'POST':
form = ForgottonPasswordForm(data=request.POST.copy())
if form.is_valid():
server_id = form.cleaned_data['server']
try:
conn = getGuestConnection(server_id)
except Exception:
logger.error(traceback.format_exc())
error = "Internal server error, please contact administrator."
if conn is not None:
try:
req = omero.cmd.ResetPasswordRequest(
smart_str(form.cleaned_data['username']),
smart_str(form.cleaned_data['email']))
handle = conn.c.sf.submit(req)
try:
conn._waitOnCmd(handle)
finally:
handle.close()
error = "Password was reset. Check your mailbox."
form = None
except omero.CmdError, exp:
logger.error(exp.err)
try:
error = exp.err.parameters[
exp.err.parameters.keys()[0]]
except:
error = exp
else:
form = ForgottonPasswordForm()
context = {'error': error, 'form': form, 'build_year': build_year,
'omero_version': omero_version}
t = template_loader.get_template(template)
c = Context(request, context)
rsp = t.render(c)
return HttpResponse(rsp)
@login_required()
def index(request, **kwargs):
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
if conn.isAdmin():
return HttpResponseRedirect(reverse("waexperimenters"))
else:
return HttpResponseRedirect(reverse("wamyaccount"))
@login_required()
def logout(request, **kwargs):
omeroweb.webclient.views.logout(request, **kwargs)
return HttpResponseRedirect(reverse("waindex"))
@login_required(isAdmin=True)
@render_response_admin()
def experimenters(request, conn=None, **kwargs):
template = "webadmin/experimenters.html"
experimenterList = list(conn.getObjects("Experimenter"))
can_modify_user = 'ModifyUser' in conn.getCurrentAdminPrivileges()
context = {'experimenterList': experimenterList,
'can_modify_user': can_modify_user}
context['template'] = template
return context
@login_required(isAdmin=True)
@render_response_admin()
def manage_experimenter(request, action, eid=None, conn=None, **kwargs):
template = "webadmin/experimenter_form.html"
groups = list(conn.getObjects("ExperimenterGroup"))
groups.sort(key=lambda x: x.getName().lower())
user_privileges = conn.get_privileges_for_form(
conn.getCurrentAdminPrivileges())
can_modify_user = 'ModifyUser' in user_privileges
if action == 'new':
form = ExperimenterForm(
can_modify_user=can_modify_user,
user_privileges=user_privileges,
initial={'with_password': True,
'active': True,
'groups': otherGroupsInitialList(groups)})
admin_groups = [
conn.getAdminService().getSecurityRoles().systemGroupId]
context = {'form': form,
'admin_groups': admin_groups,
'can_modify_user': can_modify_user}
elif action == 'create':
if request.method != 'POST':
return HttpResponseRedirect(
reverse(viewname="wamanageexperimenterid", args=["new"]))
else:
name_check = conn.checkOmeName(request.POST.get('omename'))
email_check = conn.checkEmail(request.POST.get('email'))
my_groups = getSelectedGroups(
conn,
request.POST.getlist('other_groups'))
initial = {'with_password': True,
'my_groups': my_groups,
'groups': otherGroupsInitialList(groups)}
# This form may be returned to user if invalid
# Needs user_privileges & can_modify_user for this
form = ExperimenterForm(
can_modify_user=can_modify_user,
user_privileges=user_privileges,
initial=initial, data=request.POST.copy(),
name_check=name_check, email_check=email_check)
if form.is_valid():
logger.debug("Create experimenter form:" +
str(form.cleaned_data))
omename = form.cleaned_data['omename']
firstName = form.cleaned_data['first_name']
middleName = form.cleaned_data['middle_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
institution = form.cleaned_data['institution']
role = form.cleaned_data['role']
admin = role in ('administrator', 'restricted_administrator')
active = form.cleaned_data['active']
defaultGroup = form.cleaned_data['default_group']
otherGroups = form.cleaned_data['other_groups']
password = form.cleaned_data['password']
# default group
# if default group was not selected take first from the list.
if defaultGroup is None:
defaultGroup = otherGroups[0]
privileges = conn.get_privileges_from_form(form)
if privileges is not None:
# Only process privileges that we have permission to set
privileges = [p for p in privileges
if p in conn.getCurrentAdminPrivileges()]
# Create a User, Restricted-Admin or Admin, based on privileges
conn.createExperimenter(
omename, firstName, lastName, email, admin, active,
defaultGroup, otherGroups, password,
privileges, middleName, institution)
return HttpResponseRedirect(reverse("waexperimenters"))
# Handle invalid form
context = {'form': form, 'can_modify_user': can_modify_user}
elif action == 'edit':
experimenter, defaultGroup, otherGroups, isLdapUser, hasAvatar = \
prepare_experimenter(conn, eid)
try:
defaultGroupId = defaultGroup.id
except:
defaultGroupId = None
initial = {
'omename': experimenter.omeName,
'first_name': experimenter.firstName,
'middle_name': experimenter.middleName,
'last_name': experimenter.lastName,
'email': experimenter.email,
'institution': experimenter.institution,
'active': experimenter.isActive(),
'default_group': defaultGroupId,
'my_groups': otherGroups,
'other_groups': [g.id for g in otherGroups],
'groups': otherGroupsInitialList(groups)}
# Load 'AdminPrivilege' roles for 'initial'
privileges = conn.getAdminPrivileges(experimenter.id)
for p in conn.get_privileges_for_form(privileges):
initial[p] = True
role = 'user'
if experimenter.isAdmin():
if 'ReadSession' in privileges:
role = 'administrator'
else:
role = 'restricted_administrator'
initial['role'] = role
root_id = conn.getAdminService().getSecurityRoles().rootId
user_id = conn.getUserId()
experimenter_root = long(eid) == root_id
experimenter_me = long(eid) == user_id
form = ExperimenterForm(
can_modify_user=can_modify_user,
user_privileges=user_privileges,
experimenter_me=experimenter_me,
experimenter_root=experimenter_root,
initial=initial)
password_form = ChangePassword()
admin_groups = [
conn.getAdminService().getSecurityRoles().systemGroupId]
context = {'form': form, 'eid': eid, 'ldapAuth': isLdapUser,
'can_modify_user': can_modify_user,
'password_form': password_form,
'admin_groups': admin_groups}
elif action == 'save':
experimenter, defaultGroup, otherGroups, isLdapUser, hasAvatar = \
prepare_experimenter(conn, eid)
if request.method != 'POST':
return HttpResponseRedirect(
reverse(viewname="wamanageexperimenterid",
args=["edit", experimenter.id]))
else:
name_check = conn.checkOmeName(request.POST.get('omename'),
experimenter.omeName)
email_check = conn.checkEmail(request.POST.get('email'),
experimenter.email)
my_groups = getSelectedGroups(
conn,
request.POST.getlist('other_groups'))
initial = {'my_groups': my_groups,
'groups': otherGroupsInitialList(groups)}
form = ExperimenterForm(can_modify_user=can_modify_user,
user_privileges=user_privileges,
initial=initial, data=request.POST.copy(),
name_check=name_check,
email_check=email_check)
if form.is_valid():
logger.debug("Update experimenter form:" +
str(form.cleaned_data))
omename = form.cleaned_data['omename']
firstName = form.cleaned_data['first_name']
middleName = form.cleaned_data['middle_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
institution = form.cleaned_data['institution']
role = form.cleaned_data['role']
admin = role in ('administrator', 'restricted_administrator')
active = form.cleaned_data['active']
rootId = conn.getAdminService().getSecurityRoles().rootId
# User can't disable themselves or 'root'
if experimenter.getId() in [conn.getUserId(), rootId]:
# disabled checkbox not in POST: do it manually
active = True
defaultGroup = form.cleaned_data['default_group']
otherGroups = form.cleaned_data['other_groups']
# default group
# if default group was not selected take first from the list.
if defaultGroup is None:
defaultGroup = otherGroups[0]
for g in groups:
if long(defaultGroup) == g.id:
dGroup = g
break
listOfOtherGroups = set()
# rest of groups
for g in groups:
for og in otherGroups:
# remove defaultGroup from otherGroups if contains
if long(og) == long(dGroup.id):
pass
elif long(og) == g.id:
listOfOtherGroups.add(g)
# Update 'AdminPrivilege' config roles for user
privileges = conn.get_privileges_from_form(form)
to_add = []
to_remove = []
# privileges may be None if disabled in form
if privileges is not None:
# Only update privileges that we have permission to set
# (prevents privilege escalation)
for p in conn.getCurrentAdminPrivileges():
if p in privileges:
to_add.append(p)
else:
to_remove.append(p)
conn.updateAdminPrivileges(experimenter.id,
to_add, to_remove)
conn.updateExperimenter(
experimenter, omename, firstName, lastName, email, admin,
active, dGroup, listOfOtherGroups, middleName,
institution)
return HttpResponseRedirect(reverse("waexperimenters"))
context = {'form': form, 'eid': eid, 'ldapAuth': isLdapUser,
'can_modify_user': can_modify_user}
else:
return HttpResponseRedirect(reverse("waexperimenters"))
context['template'] = template
return context
@sensitive_post_parameters('old_password', 'password',
'confirmation', 'csrfmiddlewaretoken')
@login_required()
@render_response_admin()
def manage_password(request, eid, conn=None, **kwargs):
template = "webadmin/password.html"
error = None
if request.method == 'POST':
password_form = ChangePassword(data=request.POST.copy())
if not password_form.is_valid():
error = password_form.errors
else:
old_password = password_form.cleaned_data['old_password']
password = password_form.cleaned_data['password']
# if we're trying to change our own password...
if conn.getEventContext().userId == int(eid):
try:
conn.changeMyPassword(password, old_password)
except Exception, x:
error = x.message # E.g. old_password not valid
elif conn.isAdmin():
exp = conn.getObject("Experimenter", eid)
try:
conn.changeUserPassword(exp.omeName, password,
old_password)
except Exception, x:
error = x.message
else:
raise AttributeError("Can't change another user's password"
" unless you are an Admin")
context = {'error': error, 'password_form': password_form, 'eid': eid}
context['template'] = template
return context
@login_required(isAdmin=True)
@render_response_admin()
def groups(request, conn=None, **kwargs):
template = "webadmin/groups.html"
groups = conn.getObjects("ExperimenterGroup")
can_modify_group = 'ModifyGroup' in conn.getCurrentAdminPrivileges()
can_add_member = 'ModifyGroupMembership' in \
conn.getCurrentAdminPrivileges()
context = {'groups': groups, 'can_modify_group': can_modify_group,
'can_add_member': can_add_member}
context['template'] = template
return context
@login_required(isAdmin=True)
@render_response_admin()
def manage_group(request, action, gid=None, conn=None, **kwargs):
template = "webadmin/group_form.html"
msgs = []
user_privileges = conn.getCurrentAdminPrivileges()
can_modify_group = 'ModifyGroup' in user_privileges
can_add_member = 'ModifyGroupMembership' in user_privileges
experimenters = list(conn.getObjects("Experimenter"))
experimenters.sort(key=lambda x: x.getLastName().lower())
system_groups = [
conn.getAdminService().getSecurityRoles().systemGroupId,
conn.getAdminService().getSecurityRoles().userGroupId,
conn.getAdminService().getSecurityRoles().guestGroupId]
initial = {'experimenters': experimenters,
'permissions': 0}
group_is_system = False
name_check = False
data = None
if gid is not None:
group = conn.getObject("ExperimenterGroup", gid)
initial['name'] = group.name
initial['description'] = group.description
initial['owners'] = [e.id for e in group.getOwners()]
initial['members'] = [m.id for m in group.getMembers()]
initial['permissions'] = getActualPermissions(group)
group_is_system = long(gid) in system_groups
if request.method == 'POST':
data = request.POST.copy()
# name needs to be unique
old_name = group.name if gid is not None else None
name_check = conn.checkGroupName(request.POST.get('name'), old_name)
form = GroupForm(initial=initial,
data=data,
name_check=name_check,
can_modify_group=can_modify_group,
can_add_member=can_add_member,
group_is_system=group_is_system)
context = {'form': form}
if action == 'new' or action == 'edit':
# form prepared above - nothing else needed
pass
elif action == 'create':
if request.method != 'POST':
return HttpResponseRedirect(reverse(viewname="wamanagegroupid",
args=["new"]))
else:
if form.is_valid():
logger.debug("Create group form:" + str(form.cleaned_data))
if can_modify_group:
name = form.cleaned_data['name']
description = form.cleaned_data['description']
permissions = form.cleaned_data['permissions']
perm = setActualPermissions(permissions)
gid = conn.createGroup(name, perm, description)
if can_add_member:
owners = form.cleaned_data['owners']
members = form.cleaned_data['members']
group = conn.getObject("ExperimenterGroup", gid)
listOfOwners = getSelectedExperimenters(conn, owners)
conn.setOwnersOfGroup(group, listOfOwners)
new_members = getSelectedExperimenters(
conn, mergeLists(members, owners))
conn.setMembersOfGroup(group, new_members)
return HttpResponseRedirect(reverse("wagroups"))
elif action == 'save':
if request.method != 'POST':
return HttpResponseRedirect(reverse(viewname="wamanagegroupid",
args=["edit", group.id]))
else:
if form.is_valid():
logger.debug("Update group form:" + str(form.cleaned_data))
if can_modify_group:
name = form.cleaned_data['name']
description = form.cleaned_data['description']
permissions = form.cleaned_data['permissions']
if permissions != int(permissions):
perm = setActualPermissions(permissions)
else:
perm = None
try:
msgs = conn.updateGroup(group, name, perm, description)
except omero.SecurityViolation, ex:
if ex.message.startswith('Cannot change permissions'):
msgs.append("Downgrade to private group not"
" currently possible")
else:
msgs.append(ex.message)
removalFails = []
if can_add_member:
owners = form.cleaned_data['owners']
members = form.cleaned_data['members']
listOfOwners = getSelectedExperimenters(conn, owners)
conn.setOwnersOfGroup(group, listOfOwners)
new_members = getSelectedExperimenters(
conn, mergeLists(members, owners))
removalFails = conn.setMembersOfGroup(group, new_members)
if len(removalFails) == 0 and len(msgs) == 0:
return HttpResponseRedirect(reverse("wagroups"))
# If we've failed to remove user...
# prepare error messages
for e in removalFails:
url = reverse("wamanageexperimenterid",
args=["edit", e.id])
msgs.append("Can't remove user <a href='%s'>%s</a> from"
" their only group" % (url, e.getFullName()))
# Refresh form (ignore POST data)
context['form'] = GroupForm(initial=initial,
name_check=name_check,
can_modify_group=can_modify_group,
can_add_member=can_add_member,
group_is_system=group_is_system)
else:
return HttpResponseRedirect(reverse("wagroups"))
context['userId'] = conn.getEventContext().userId
context['template'] = template
context['can_modify_group'] = can_modify_group
context['can_add_member'] = can_add_member
context['gid'] = gid
# prevent removing 'root' or yourself from group if it's a system group
context['admins'] = [conn.getAdminService().getSecurityRoles().rootId]
if group_is_system:
context['admins'].append(conn.getUserId())
if len(msgs) > 0:
context['ome'] = {}
context['ome']['message'] = "<br>".join(msgs)
return context
@login_required(isGroupOwner=True)
@render_response_admin()
def manage_group_owner(request, action, gid, conn=None, **kwargs):
template = "webadmin/group_form_owner.html"
group = conn.getObject("ExperimenterGroup", gid)
experimenters = list(conn.getObjects("Experimenter"))
userId = conn.getEventContext().userId
def getEditFormContext():
group = conn.getObject("ExperimenterGroup", gid)
memberIds = [m.id for m in group.getMembers()]
ownerIds = [e.id for e in group.getOwners()]
permissions = getActualPermissions(group)
form = GroupOwnerForm(initial={'permissions': permissions,
'members': memberIds,
'owners': ownerIds,
'experimenters': experimenters})
context = {'form': form, 'gid': gid, 'permissions': permissions,
"group": group}
experimenterDefaultIds = list()
for e in experimenters:
if (e != userId and e.getDefaultGroup() is not None and
e.getDefaultGroup().id == group.id):
experimenterDefaultIds.append(str(e.id))
context['experimenterDefaultGroups'] = ",".join(experimenterDefaultIds)
context['ownerIds'] = (",".join(str(x) for x in ownerIds
if x != userId))
return context
msgs = []
if action == 'edit':
context = getEditFormContext()
elif action == "save":
if request.method != 'POST':
return HttpResponseRedirect(
reverse(viewname="wamanagegroupownerid",
args=["edit", group.id]))
else:
form = GroupOwnerForm(data=request.POST.copy(),
initial={'experimenters': experimenters})
if form.is_valid():
members = form.cleaned_data['members']
owners = form.cleaned_data['owners']
permissions = form.cleaned_data['permissions']
listOfOwners = getSelectedExperimenters(conn, owners)
conn.setOwnersOfGroup(group, listOfOwners)
new_members = getSelectedExperimenters(conn, members)
removalFails = conn.setMembersOfGroup(group, new_members)
permissions = int(permissions)
if getActualPermissions(group) != permissions:
perm = setActualPermissions(permissions)
try:
msg = conn.updatePermissions(group, perm)
if msg is not None:
msgs.append(msg)
except omero.SecurityViolation, ex:
if ex.message.startswith('Cannot change permissions'):
msgs.append("Downgrade to private group not"
" currently possible")
else:
msgs.append(ex.message)
if len(removalFails) == 0 and len(msgs) == 0:
return HttpResponseRedirect(reverse("wamyaccount"))
# If we've failed to remove user...
# prepare error messages
for e in removalFails:
url = reverse("wamanageexperimenterid",
args=["edit", e.id])
msgs.append("Can't remove user <a href='%s'>%s</a> from"
" their only group" % (url, e.getFullName()))
# refresh the form and add messages
context = getEditFormContext()
else:
context = {'gid': gid, 'form': form}
else:
return HttpResponseRedirect(reverse("wamyaccount"))
context['userId'] = userId
context['template'] = template
if len(msgs) > 0:
context['ome'] = {}
context['ome']['message'] = "<br>".join(msgs)
return context
@login_required()
@render_response_admin()
def my_account(request, action=None, conn=None, **kwargs):
template = "webadmin/myaccount.html"
experimenter, defaultGroup, otherGroups, isLdapUser, hasAvatar = \
prepare_experimenter(conn)
try:
defaultGroupId = defaultGroup.id
except:
defaultGroupId = None
ownedGroups = ownedGroupsInitial(conn)
password_form = ChangePassword()
form = None
if action == "save":
if request.method != 'POST':
return HttpResponseRedirect(reverse(viewname="wamyaccount",
args=["edit"]))
else:
email_check = conn.checkEmail(request.POST.get('email'),
experimenter.email)
form = MyAccountForm(data=request.POST.copy(),
initial={'groups': otherGroups},
email_check=email_check)
if form.is_valid():
firstName = form.cleaned_data['first_name']
middleName = form.cleaned_data['middle_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
institution = form.cleaned_data['institution']
defaultGroupId = form.cleaned_data['default_group']
conn.updateMyAccount(
experimenter, firstName, lastName, email, defaultGroupId,
middleName, institution)
return HttpResponseRedirect(reverse("wamyaccount"))
else:
form = MyAccountForm(initial={
'omename': experimenter.omeName,
'first_name': experimenter.firstName,
'middle_name': experimenter.middleName,
'last_name': experimenter.lastName,
'email': experimenter.email,
'institution': experimenter.institution,
'default_group': defaultGroupId,
'groups': otherGroups})
context = {'form': form, 'ldapAuth': isLdapUser,
'experimenter': experimenter, 'ownedGroups': ownedGroups,
'password_form': password_form}
context['freeSpace'] = conn.getFreeSpace()
context['template'] = template
return context
@login_required()
def myphoto(request, conn=None, **kwargs):
photo = conn.getExperimenterPhoto()
return HttpJPEGResponse(photo)
@login_required()
@render_response_admin()
def manage_avatar(request, action=None, conn=None, **kwargs):
template = "webadmin/avatar.html"
edit_mode = False
photo_size = None
form_file = UploadPhotoForm()
if action == "upload":
if request.method == 'POST':
form_file = UploadPhotoForm(request.POST, request.FILES)
if form_file.is_valid():
attach_photo(conn, request.FILES['photo'])
return HttpResponseRedirect(
reverse(viewname="wamanageavatar",
args=[conn.getEventContext().userId]))
elif action == "crop":
x1 = long(request.POST.get('x1'))
x2 = long(request.POST.get('x2'))
y1 = long(request.POST.get('y1'))
y2 = long(request.POST.get('y2'))
box = (x1, y1, x2, y2)
conn.cropExperimenterPhoto(box)
return HttpResponseRedirect(reverse("wamyaccount"))
elif action == "editphoto":
photo_size = conn.getExperimenterPhotoSize()
if photo_size is not None:
edit_mode = True
elif action == "deletephoto":
conn.deleteExperimenterPhoto()
return HttpResponseRedirect(reverse("wamyaccount"))
photo_size = conn.getExperimenterPhotoSize()
context = {'form_file': form_file, 'edit_mode': edit_mode,
'photo_size': photo_size}
context['template'] = template
return context
@login_required()
@render_response_admin()
def stats(request, conn=None, **kwargs):
template = "webadmin/statistics.html"
freeSpace = conn.getFreeSpace()
context = {'template': template, 'freeSpace': freeSpace}
return context
# @login_required()
# def load_drivespace(request, conn=None, **kwargs):
# offset = request.POST.get('offset', 0)
# rv = usersData(conn, offset)
# return HttpJsonResponse(rv)
@login_required(isAdmin=True)
@render_response_admin()
def email(request, conn=None, **kwargs):
"""
View to gather recipients, subject and message for sending email
announcements
"""
# Check that the appropriate web settings are available
if (not request.session.get('server_settings', False)
.get('email', False)):
return {'template': 'webadmin/noemail.html'}
context = {'template': 'webadmin/email.html'}
# Get experimenters and groups.
experimenter_list = list(conn.getObjects("Experimenter"))
group_list = list(conn.getObjects("ExperimenterGroup"))
# Sort experimenters and groups
experimenter_list.sort(key=lambda x: x.getFirstName().lower())
group_list.sort(key=lambda x: x.getName().lower())
if request.method == 'POST': # If the form has been submitted...
# ContactForm was defined in the the previous section
form = EmailForm(experimenter_list, group_list, conn, request,
data=request.POST.copy())
if form.is_valid(): # All validation rules pass
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
experimenters = form.cleaned_data['experimenters']
groups = form.cleaned_data['groups']
everyone = form.cleaned_data['everyone']
inactive = form.cleaned_data['inactive']
req = omero.cmd.SendEmailRequest(subject=subject, body=message,
groupIds=groups,
userIds=experimenters,
everyone=everyone,
inactive=inactive)
handle = conn.c.sf.submit(req)
if handle is not None:
request.session.modified = True
request.session['callback'][str(handle)] = {
'job_type': 'send_email',
'status': 'in progress', 'error': 0,
'start_time': datetime.datetime.now()}
form = EmailForm(experimenter_list, group_list, conn, request)
context['non_field_errors'] = ("Email sent."
" Check status in activities.")
else:
context['non_field_errors'] = "Email wasn't sent."
else:
form = EmailForm(experimenter_list, group_list, conn, request)
context['form'] = form
return context
# Problem where render_response_admin was not populating required
# admin details:
# Explanation is that the CBV FormView returns an http response so the
# decorator render_response_admin simply bails out and returns this
# I think maybe the render_response decorator should not be adding context
# because it fails in situations like this, better to insert that context
# using a template tag when required
| gpl-2.0 |
MER-GROUP/intellij-community | python/helpers/py2only/docutils/parsers/rst/languages/sv.py | 57 | 3988 | # $Id: sv.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Adam Chodorowski <chodorowski@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'observera': 'attention',
u'caution (translation required)': 'caution',
u'fara': 'danger',
u'fel': 'error',
u'v\u00e4gledning': 'hint',
u'viktigt': 'important',
u'notera': 'note',
u'tips': 'tip',
u'varning': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u'\u00e4mne': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'mellanrubrik': 'rubric',
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'meta': 'meta',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
u'figur': 'figure',
u'inkludera': 'include',
u'r\u00e5': 'raw', # FIXME: Translation might be too literal.
u'ers\u00e4tt': 'replace',
u'unicode': 'unicode',
u'datum': 'date',
u'class (translation required)': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'inneh\u00e5ll': 'contents',
u'sektionsnumrering': 'sectnum',
u'target-notes (translation required)': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abbreviation (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'index (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'r\u00e5': 'raw',}
"""Mapping of Swedish role names to canonical role names for interpreted text.
"""
| apache-2.0 |
vanzhiganov/jftpgw | support/cachepurgy.py | 1 | 2239 | #!/usr/bin/env python
#
# cachepurgy - script to reduce the size of a jftpgw cache
#
# (C) 2001 Julian Einwag <julian@brightstar.swin.de>
#
from sys import *
from os import *
from string import *
from stat import *
from time import *
maxsize = 40*1024*1024
cachedir = "/tmp/cache"
# This class stores the information of an object in cache (size, age, etc...)
class fileinfo:
def __init__(self, name):
statobj = stat(name)
self.age = time()-statobj[ST_CTIME]
self.size = statobj[ST_SIZE]
self.name = name
self.isempty = 0
if path.isdir(name):
self.isdir = 1
self.isempty = isempty(name)
else:
self.isdir = 0
def __cmp__(self, other):
# We want to have older items first
return cmp(other.age, self.age)
# Checks if a dir is empty
def isempty(dir):
if len(listdir(dir)) == 0:
return 1
else:
return 0
# Caclulates the size of the cache
def cachesize(stats):
size = 0
for file in stats:
size = size + file.size
return size
# This removes empty dirs from the cache
def removedirs(stats):
for file in stats:
if file.isdir and file.isempty:
print "Removing directory: ", file.name
rmdir(file.name)
# Cleans the cache
def cleancache(stats):
if cachesize(stats) > maxsize:
if (not stats[0].isdir):
print "Delete: %s" % stats[0].name
try:
unlink(stats[0].name)
except OSError:
stdout.write("File %s does not exist!" % stats[0].name)
# Yeah, I love LISP and recursion
cleancache(stats[1:])
else:
return
def main():
input = popen("find %s -print 2> /dev/null" % cachedir, 'r')
cacheindex = input.readlines()
input.close()
try:
chdir(cachedir)
except OSError:
stderr.write("Cachedir %s does not exist!\n" % cachedir)
exit(1)
cacheindex = map(rstrip, cacheindex)
stats = map(fileinfo, cacheindex)
stats.sort()
cleancache(stats)
removedirs(stats)
if __name__ == '__main__':
main()
| gpl-2.0 |
teeheee/RobocupSoccerSimulator | robotsimul.py | 1 | 5679 | #!/usr/bin/env python3
from game import *
from debugger import Debugger
from logSaver import Logger
from gameconfig import gc
from popup_menu import *
import sys
import time
#TODO more comments
#TODO clean up this mess
class App:
def __init__(self):
# flag for shutdown of the simulation
self._running = True
# flags for the keyboard control Interface
self.robotcontrol = False #True for manual control
self.pause = False #True for game paused
self.focusedrobot = 0 #Id of the robot which sensor values are displayed on the debugger
self._display_surf = None # dubble buffered display to minimize lag
self.size = self.width, self.height = 243*3, 182*3 # Window size is fixed TODO: variable window size
self.menu_data = (
'Robot Simulator',
'Restart',
'Quit',
)
self.menu = NonBlockingPopupMenu(self.menu_data)
def on_init(self):
pygame.init()
if gc.GUI["Debugger"]:
width = self.width+self.height
else:
width = self.width
self._display_surf = pygame.display.set_mode((width, self.height), pygame.DOUBLEBUF)
self._game_display = pygame.Surface( self.size )
self.ppcm = 3 #TODO: variable window size
self.center = [self._game_display.get_height() / (2 * self.ppcm),
self._game_display.get_width() / (2 * self.ppcm)]
self._display_surf.set_alpha(None)
self._running = True
game_display_data = {"display": self._game_display,
"ppcm": self.ppcm,
"center": self.center}
self.game = Game(game_display_data)
if gc.GUI["Debugger"]:
self.debugger = Debugger(self._display_surf, self.game.robotProgramHandlers)
self.debugger.setFocusedRobot(self.focusedrobot)
if gc.GUI["Logger"]:
self.logger = Logger(self.game)
self.logger.startLogging()
pygame.mixer.quit()
def on_event(self, event):
for e in self.menu.handle_events(event):
if e.type == USEREVENT:
if e.code == 'MENU':
if e.name is None:
self.menu.hide()
elif e.text == "Quit":
self._running = False
elif e.text == "Restart":
self.game.restart()
else:
print('TODO: handle this Menu event: %s' % (e.text)) #TODO menu handling
elif e.type == MOUSEBUTTONUP:
self.menu.show()
elif e.type == pygame.QUIT:
self._running = False
def on_loop(self):
if not self.pause:
self.logger.tick()
self.game.tick(30) #calculate in ms steps
speed = 0.5
motor = np.array([0.0, 0.0, 0.0, 0.0])
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
motor += np.array([-speed, -speed, speed, speed])
if key[pygame.K_DOWN]:
motor += np.array([speed, speed, -speed, -speed])
if key[pygame.K_RIGHT]:
motor += np.array([speed/2, 0, speed/2, 0])
if key[pygame.K_LEFT]:
motor += np.array([-speed/2, 0, -speed/2, 0])
if key[pygame.K_m]:
motor += np.array([-speed, speed, speed, -speed])
if key[pygame.K_j]:
motor += np.array([speed, -speed, -speed, speed])
if key[pygame.K_1]:
self.focusedrobot = 0
if gc.GUI["Debugger"]:
self.debugger.setFocusedRobot(0)
if key[pygame.K_2]:
self.focusedrobot = 1
if gc.GUI["Debugger"]:
self.debugger.setFocusedRobot(1)
if key[pygame.K_3]:
self.focusedrobot = 2
if gc.GUI["Debugger"]:
self.debugger.setFocusedRobot(2)
if key[pygame.K_4]:
self.focusedrobot = 3
if gc.GUI["Debugger"]:
self.debugger.setFocusedRobot(3)
if key[pygame.K_v]:
if gc.GUI["Debugger"]:
self.debugger.togglePixyMode()
if key[pygame.K_p]:
self.pause = True
else:
self.pause = False
if key[pygame.K_SPACE]:
self.robotcontrol=True
else:
self.robotcontrol=False
if self.robotcontrol:
motor *= 100
self.game.robotInterfaceHandlers[self.focusedrobot].setMotorSpeed(motor[0], motor[1], motor[2], motor[3])
self.game.robotProgramHandlers[self.focusedrobot].block()
else:
self.game.robotProgramHandlers[self.focusedrobot].unBlock()
def on_render(self):
self._display_surf.fill(GREEN)
self.game.draw()
self._display_surf.blit(self._game_display,(0, 0))
if gc.GUI["Debugger"]:
self.debugger.draw()
self.menu.draw()
pygame.display.update()
def on_cleanup(self):
self.game.shutdown()
pygame.quit()
def on_execute(self):
if self.on_init() is False:
self._running = False
while(self._running):
self.on_event(pygame.event.get())
self.on_loop()
if gc.GUI["Fast"] == False:
time.sleep(0.03)
self.on_render()
self.on_cleanup()
if __name__ == "__main__":
#load config file if it is given as argument
if len(sys.argv) == 2:
gc.load(str(sys.argv[1]))
else:
gc.load(None)
theApp = App()
theApp.on_execute()
| gpl-3.0 |
theheros/kbengine | kbe/src/lib/python/Lib/test/test_pkg.py | 55 | 9399 | # Test packages (dotted-name import)
import sys
import os
import tempfile
import textwrap
import unittest
from test import support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
# XXX Things to test
#
# import package without __init__
# import package with __init__
# __init__ importing submodule
# __init__ importing global module
# __init__ defining variables
# submodule importing other submodule
# submodule importing global module
# submodule import submodule via global name
# from package import submodule
# from package import subpackage
# from package import variable (defined in __init__)
# from package import * (defined in __init__)
class TestPkg(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
self.modules_before = support.modules_setup()
def tearDown(self):
sys.path[:] = self.syspath
support.modules_cleanup(*self.modules_before)
if self.root: # Only clean if the test was actually run
cleanout(self.root)
# delete all modules concerning the tested hierarchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_1(self):
hier = [("t1", None), ("t1 __init__.py", "")]
self.mkhier(hier)
import t1
def test_2(self):
hier = [
("t2", None),
("t2 __init__.py", "'doc for t2'"),
("t2 sub", None),
("t2 sub __init__.py", ""),
("t2 sub subsub", None),
("t2 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
# This exec crap is needed because Py3k forbids 'import *' outside
# of module-scope and __import__() is insufficient for what we need.
s = """
import t2
from t2 import *
self.assertEqual(dir(), ['self', 'sub', 't2'])
"""
self.run_code(s)
from t2 import sub
from t2.sub import subsub
from t2.sub.subsub import spam
self.assertEqual(sub.__name__, "t2.sub")
self.assertEqual(subsub.__name__, "t2.sub.subsub")
self.assertEqual(sub.subsub.__name__, "t2.sub.subsub")
for name in ['spam', 'sub', 'subsub', 't2']:
self.assertTrue(locals()["name"], "Failed to import %s" % name)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
s = """
from t2 import *
self.assertTrue(dir(), ['self', 'sub'])
"""
self.run_code(s)
def test_3(self):
hier = [
("t3", None),
("t3 __init__.py", ""),
("t3 sub", None),
("t3 sub __init__.py", ""),
("t3 sub subsub", None),
("t3 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
import t3.sub.subsub
self.assertEqual(t3.__name__, "t3")
self.assertEqual(t3.sub.__name__, "t3.sub")
self.assertEqual(t3.sub.subsub.__name__, "t3.sub.subsub")
def test_4(self):
hier = [
("t4.py", "raise RuntimeError('Shouldnt load t4.py')"),
("t4", None),
("t4 __init__.py", ""),
("t4 sub.py", "raise RuntimeError('Shouldnt load sub.py')"),
("t4 sub", None),
("t4 sub __init__.py", ""),
("t4 sub subsub.py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t4 sub subsub", None),
("t4 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
s = """
from t4.sub.subsub import *
self.assertEqual(spam, 1)
"""
self.run_code(s)
def test_5(self):
hier = [
("t5", None),
("t5 __init__.py", "import t5.foo"),
("t5 string.py", "spam = 1"),
("t5 foo.py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', 'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', 'spam'])
def test_6(self):
hier = [
("t6", None),
("t6 __init__.py",
"__all__ = ['spam', 'ham', 'eggs']"),
("t6 spam.py", ""),
("t6 ham.py", ""),
("t6 eggs.py", ""),
]
self.mkhier(hier)
import t6
self.assertEqual(fixdir(dir(t6)),
['__all__', '__cached__', '__doc__', '__file__',
'__name__', '__package__', '__path__'])
s = """
import t6
from t6 import *
self.assertEqual(fixdir(dir(t6)),
['__all__', '__cached__', '__doc__', '__file__',
'__name__', '__package__', '__path__',
'eggs', 'ham', 'spam'])
self.assertEqual(dir(), ['eggs', 'ham', 'self', 'spam', 't6'])
"""
self.run_code(s)
def test_7(self):
hier = [
("t7.py", ""),
("t7", None),
("t7 __init__.py", ""),
("t7 sub.py",
"raise RuntimeError('Shouldnt load sub.py')"),
("t7 sub", None),
("t7 sub __init__.py", ""),
("t7 sub .py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t7 sub subsub", None),
("t7 sub subsub __init__.py",
"spam = 1"),
]
self.mkhier(hier)
t7, sub, subsub = None, None, None
import t7 as tas
self.assertEqual(fixdir(dir(tas)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.assertFalse(t7)
from t7 import sub as subpar
self.assertEqual(fixdir(dir(subpar)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.assertFalse(t7)
self.assertFalse(sub)
from t7.sub import subsub as subsubsub
self.assertEqual(fixdir(dir(subsubsub)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__', 'spam'])
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
from t7.sub.subsub import spam as ham
self.assertEqual(ham, 1)
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_8(self):
hier = [
("t8", None),
("t8 __init__"+os.extsep+"py", "'doc for t8'"),
]
self.mkhier(hier)
import t8
self.assertEqual(t8.__doc__, "doc for t8")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
maxming2333/v2ex | html5lib/treebuilders/__init__.py | 102 | 4478 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.simpletree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
The supplied simpletree module provides a python-only implementation
of a full treebuilder and is a useful reference for the semantics of
the various methods.
"""
treeBuilderCache = {}
import sys
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation for the sake of
backwards compatibility (as releases up until 0.10 had a
builder called "dom" that was a minidom implemenation).
"etree" - A generic builder for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"beautifulsoup" - Beautiful soup (if installed)
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or lxml.etree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
import dom
# XXX: Keep backwards compatibility by using minidom if no implementation is given
if implementation == None:
from xml.dom import minidom
implementation = minidom
# XXX: NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "simpletree":
import simpletree
treeBuilderCache[treeType] = simpletree.TreeBuilder
elif treeType == "beautifulsoup":
import soup
treeBuilderCache[treeType] = soup.TreeBuilder
elif treeType == "lxml":
import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
# Come up with a sane default
if implementation == None:
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
implementation = ET
import etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """%treeType)
return treeBuilderCache.get(treeType)
| bsd-3-clause |
ramcn/demo3 | venv/lib/python3.4/site-packages/setuptools/msvc9_support.py | 429 | 2187 | try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
| mit |
vermouthmjl/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
soarpenguin/ansible | lib/ansible/modules/network/lenovo/cnos_template.py | 59 | 7077 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send CLI templates to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_template
author: "Dave Kasberg (@dkasberg)"
short_description: Manage switch configuration using templates on devices running Lenovo CNOS
description:
- This module allows you to work with the running configuration of a switch. It provides a way
to execute a set of CNOS commands on a switch by evaluating the current running configuration
and executing the commands only if the specific settings have not been already configured.
The configuration source can be a set of commands or a template written in the Jinja2 templating language.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_template.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
commandfile:
description:
- This specifies the path to the CNOS command file which needs to be applied. This usually
comes from the commands folder. Generally this file is the output of the variables applied
on a template file. So this command is preceded by a template module.
Note The command file must contain the Ansible keyword {{ inventory_hostname }} in its
filename to ensure that the command file is unique for each switch and condition.
If this is omitted, the command file will be overwritten during iteration. For example,
commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_commands.txt
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_template. These are written in the main.yml file of the tasks directory.
---
- name: Replace Config CLI command template with values
template:
src: demo_template.j2
dest: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
vlanid1: 13
slot_chassis_number1: "1/2"
portchannel_interface_number1: 100
portchannel_mode1: "active"
- name: Applying CLI commands on Switches
cnos_template:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
commandfile: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
outputfile: "./results/demo_template_command_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Template Applied."
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
commandfile=dict(required=True),
outputfile=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
commandfile = module.params['commandfile']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send commands one by one
#with open(commandfile, "r") as f:
f = open(commandfile, "r")
for line in f:
# Omit the comment lines in template file
if not line.startswith("#"):
command = line
if not line.endswith("\n"):
command = command+"\n"
response = cnos.waitForDeviceResponse(command, "#", 2, remote_conn)
errorMsg = cnos.checkOutputForError(response)
output = output + response
if(errorMsg is not None):
break # To cater to Mufti case
# Write to memory
output = output + cnos.waitForDeviceResponse("save\n", "#", 3, remote_conn)
# Write output to file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Template Applied")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
mikkokeskinen/tunnistamo | oidc_apis/models.py | 1 | 6847 | from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db import models
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from multiselectfield import MultiSelectField
from oidc_provider.models import Client
from parler.fields import TranslatedField
from parler.managers import TranslatableQuerySet
from parler.models import TranslatableModel, TranslatedFieldsModel
from oidc_apis.utils import combine_uniquely
from .mixins import AutoFilledIdentifier, ImmutableFields
alphanumeric_validator = RegexValidator(
'^[a-z0-9]*$',
message=_("May contain only lower case letters and digits."))
SCOPE_CHOICES = [
('email', _("E-mail")),
('profile', _("Profile")),
('address', _("Address")),
('github_username', _("GitHub username")),
('ad_groups', _("AD Groups")),
]
class ApiDomain(models.Model):
identifier = models.CharField(
max_length=50, unique=True,
verbose_name=_("identifier"),
help_text=_("API domain identifier, e.g. https://api.hel.fi/auth"))
class Meta:
verbose_name = _("API domain")
verbose_name_plural = _("API domains")
def __str__(self):
return self.identifier
class Api(models.Model):
domain = models.ForeignKey(
ApiDomain,
verbose_name=("domain"),
on_delete=models.CASCADE
)
name = models.CharField(
max_length=50,
validators=[alphanumeric_validator],
verbose_name=_("name")
)
required_scopes = MultiSelectField(
choices=SCOPE_CHOICES, max_length=1000, blank=True,
verbose_name=_("required scopes"),
help_text=_(
"Select the scopes that this API needs information from. "
"Information from the selected scopes will be included to "
"the API Tokens.")
)
oidc_client = models.OneToOneField(
Client, related_name='+',
on_delete=models.CASCADE,
verbose_name=_("OIDC client")
)
class Meta:
unique_together = [('domain', 'name')]
verbose_name = _("API")
verbose_name_plural = _("APIs")
def __str__(self):
return self.identifier
@property
def identifier(self):
return '{domain}/{name}'.format(
domain=self.domain.identifier.rstrip('/'),
name=self.name)
def required_scopes_string(self):
return ' '.join(sorted(self.required_scopes))
required_scopes_string.short_description = _("required scopes")
def clean(self):
if getattr(self, 'oidc_client', None) is None:
self.oidc_client = _get_or_create_oidc_client_for_api(self)
else:
if self.oidc_client.client_id != self.identifier:
raise ValidationError(
{'oidc_client': _(
"OIDC Client ID must match with the identifier")})
super(Api, self).clean()
def save(self, *args, **kwargs):
self.clean()
super(Api, self).save(*args, **kwargs)
def _get_or_create_oidc_client_for_api(api):
(client, _created) = Client.objects.get_or_create(
client_id=api.identifier,
defaults={
'name': api.name,
'client_type': 'confidential',
'client_secret': get_random_string(length=50),
'response_type': 'code',
'jwt_alg': 'RS256',
})
return client
class ApiScopeQuerySet(TranslatableQuerySet):
def by_identifiers(self, identifiers):
return self.filter(identifier__in=identifiers)
def allowed_for_client(self, client):
return self.filter(allowed_apps=client)
class ApiScope(AutoFilledIdentifier, ImmutableFields, TranslatableModel):
immutable_fields = ['api', 'specifier', 'identifier']
identifier = models.CharField(
max_length=150, unique=True, editable=False,
verbose_name=_("identifier"),
help_text=_(
"The scope identifier as known by the API application "
"(i.e. the Resource Server). Generated automatically from "
"the API identifier and the scope specifier."))
api = models.ForeignKey(
Api, related_name='scopes', on_delete=models.CASCADE,
verbose_name=_("API"),
help_text=_("The API that this scope is for."))
specifier = models.CharField(
max_length=30, blank=True,
validators=[alphanumeric_validator],
verbose_name=_("specifier"),
help_text=_(
"If there is a need for multiple scopes per API, "
"this can specify what kind of scope this is about, "
"e.g. \"readonly\". For general API scope "
"just leave this empty."))
name = TranslatedField()
description = TranslatedField()
allowed_apps = models.ManyToManyField(
Client, related_name='granted_api_scopes',
verbose_name=_("allowed applications"),
help_text=_("Select client applications which are allowed "
"to get access to this API scope."))
objects = ApiScopeQuerySet.as_manager()
class Meta:
unique_together = [('api', 'specifier')]
verbose_name = _("API scope")
verbose_name_plural = _("API scopes")
@property
def relative_identifier(self):
return '{api_name}{suffix}'.format(
api_name=self.api.name,
suffix=('.' + self.specifier if self.specifier else '')
)
def _generate_identifier(self):
return '{api_identifier}{suffix}'.format(
api_identifier=self.api.identifier,
suffix=('.' + self.specifier if self.specifier else '')
)
@classmethod
def extend_scope(cls, scopes):
required_scopes = cls._get_required_scopes(scopes)
extended_scopes = combine_uniquely(scopes, sorted(required_scopes))
return extended_scopes
@classmethod
def _get_required_scopes(cls, scopes):
api_scopes = ApiScope.objects.by_identifiers(scopes)
apis = {x.api for x in api_scopes}
return set(sum((list(api.required_scopes) for api in apis), []))
class ApiScopeTranslation(TranslatedFieldsModel):
master = models.ForeignKey(
ApiScope, related_name='translations', null=True,
on_delete=models.CASCADE,
verbose_name=_("API scope"))
name = models.CharField(
max_length=200, verbose_name=_("name"))
description = models.CharField(
max_length=1000, verbose_name=_("description"))
class Meta:
unique_together = [('language_code', 'master')]
verbose_name = _("API scope translation")
verbose_name_plural = _("API scope translations")
def __str__(self):
return "{obj}[{lang}]".format(obj=self.master, lang=self.language_code)
| mit |
pombredanne/django-rest-framework-saasy | rest_framework_saasy/viewsets.py | 2 | 2408 | # -*- coding: utf-8 -*-
"""DRF SaaS ViewSetMixin"""
from functools import update_wrapper
from django.utils.decorators import classonlymethod
from django.views.decorators.csrf import csrf_exempt
from rest_framework import viewsets
from rest_framework_saasy.utils import get_cls
__all__ = ['ViewSetMixin', 'ViewSet', 'GenericViewSet']
class ViewSetMixin(viewsets.ViewSetMixin):
"""SaaS extension of rest_framework ViewSetMixin"""
SAAS_MODULE = None
@classonlymethod
def as_view(cls, actions=None, **initkwargs):
"""While this call returns the view function, it needs to be
reworked due to the nature of this plugin: dynamic class
initialization.
"""
super(ViewSetMixin, cls).as_view(actions, **initkwargs)
def view(request, *args, **kwargs):
"""Slightly modified rest_framework wrapped view.
@see rest_framework.viewsets.ViewSetMixin
"""
self = get_cls(cls, kwargs, initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
# Patch this in as it's otherwise only present from 1.5 onwards
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
# And continue as usual
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
# We need to set these on the view function, so that breadcrumb
# generation can pick out these bits of information from a
# resolved URL.
view.cls = cls
view.suffix = initkwargs.get('suffix', None)
return csrf_exempt(view)
class ViewSet(ViewSetMixin, viewsets.ViewSet):
pass
class GenericViewSet(ViewSetMixin, viewsets.GenericViewSet):
pass
| mit |
UltrosBot/Ultros-repos | control/system/manager.py | 1 | 2416 | __author__ = 'Gareth'
import os
from twisted.internet import reactor
import yaml
# import control.system.servers as servers
import control.system.ssl as ssl
from control.utils.log import getLogger
from control.system.singleton import Singleton
CONF_DIR = "control/config/"
DATA_DIR = "control/data/"
LOGS_DIR = "control/logs/"
class Manager(object):
__metaclass__ = Singleton
config = {}
def __init__(self):
self.logger = getLogger("Manager")
try:
self.logger.info("Ensuring directories exist..")
self.create_dirs()
except Exception:
self.logger.exception("Error while creating directories")
return
self.logger.info("Loading configuration..")
if not self.load_config():
self.logger.error("Unable to find control/config/config.yml")
return
try:
self.logger.info("Ensuring SSL cert exists..")
self.create_ssl()
except Exception:
self.logger.exception("Error while creating SSL cert")
return
reactor.run()
def create_dirs(self):
paths = [CONF_DIR, DATA_DIR,
DATA_DIR + "ssl"]
for path in paths:
if not os.path.exists(path):
self.logger.trace("Creating directory: %s" % path)
os.mkdir(path)
def load_config(self):
if not os.path.exists(CONF_DIR + "config.yml"):
return False
self.config = yaml.load(open(CONF_DIR + "config.yml", "r"))
return True
def create_ssl(self):
if not os.path.exists(DATA_DIR + "ssl/ssl.crt"):
self.logger.trace("No SSL cert found; generating..")
self.logger.info("Generating SSL cert. This may take a while.")
ssl.create_self_signed_cert(
DATA_DIR + "ssl",
self.config.get("ssl", {})
)
self.logger.info("Done!")
elif not os.path.exists(DATA_DIR + "ssl/ssl.key"):
self.logger.trace("No private key found; generating..")
self.logger.info("Generating SSL cert. This may take a while.")
ssl.create_self_signed_cert(
DATA_DIR + "ssl",
self.config.get("ssl", {})
)
self.logger.info("Done!")
else:
self.logger.info("SSL cert and key found.")
| artistic-2.0 |
PeteE/roro | python/driver.py | 1 | 2957 | import time
import signal
import sys
import smbus
import robot_data_pb2
from oled_display import OledDisplay
class RobotDriver:
SERVO_STOP = 90
def __init__(self, i2c_address=0x04, i2c_bus=1, oled_display=None):
self.i2c_address = i2c_address
self.i2c_bus = smbus.SMBus(i2c_bus)
self.oled_display = oled_display
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.current_state = robot_data_pb2.RobotData()
self.set_state(s0_pos=90, s1_pos=90, led_pattern=robot_data_pb2.RobotData.OFF)
def exit_gracefully(self, signum, frame):
print('Exiting.')
self.set_state(s0_pos=90, s1_pos=90, led_pattern=robot_data_pb2.RobotData.OFF)
if self.oled_display:
self.oled_display.clear()
sys.exit(0)
def get_state(self):
try:
data_length = self.i2c_bus.read_byte(self.i2c_address)
#print('Length: {}'.format(data_length))
i = 0;
data = []
while i < data_length:
data.append(self.i2c_bus.read_byte(self.i2c_address))
i+=1
rd = robot_data_pb2.RobotData()
rd.ParseFromString("".join(map(chr, data)))
print(rd)
if self.oled_display:
oled_text = ['RobotState:',
's0: {}, s1: {}'.format(rd.s0_pos, rd.s1_pos),
'sF: {}, sB: {}'.format(rd.sonarf, rd.sonarb),
]
self.oled_display.display_text('\n'.join(oled_text))
except Exception as e:
print('Error getting state from robot.')
def set_state(self, s0_pos, s1_pos, led_pattern):
try:
self.current_state.s0_pos=s0_pos
self.current_state.s1_pos=s1_pos
self.current_state.led_pattern=led_pattern
self.current_state.sonarf=0
self.current_state.sonarb=0
data = self.current_state.SerializeToString()
data_size = len(data)
# write header
self.i2c_bus.write_byte(self.i2c_address, (data_size >> 8) & 0xFF)
self.i2c_bus.write_byte(self.i2c_address, data_size & 0xFF)
# write data
for c in data:
self.i2c_bus.write_byte(self.i2c_address, ord(c))
except Exception as e:
print(e)
if __name__ == '__main__':
oled = OledDisplay()
driver = RobotDriver(oled_display=oled)
while True:
for i in range(90, 40, -5):
driver.set_state(s0_pos=i, s1_pos=i, led_pattern=robot_data_pb2.RobotData.RAINBOW)
time.sleep(.5)
driver.get_state()
for i in range(40, 90, 5):
driver.set_state(s0_pos=i, s1_pos=i, led_pattern=robot_data_pb2.RobotData.RAINBOW)
time.sleep(.5)
driver.get_state()
| bsd-3-clause |
wong2/sentry | src/sentry/web/frontend/team_settings.py | 13 | 1958 | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, Team, OrganizationMember,
OrganizationMemberType
)
from sentry.web.frontend.base import TeamView
class EditTeamForm(forms.ModelForm):
class Meta:
fields = ('name', 'slug',)
model = Team
class TeamSettingsView(TeamView):
required_access = OrganizationMemberType.ADMIN
def get_form(self, request, team):
return EditTeamForm(request.POST or None, instance=team)
def handle(self, request, organization, team):
form = self.get_form(request, team)
if form.is_valid():
team = form.save()
AuditLogEntry.objects.create(
organization=organization,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=team.id,
event=AuditLogEntryEvent.TEAM_EDIT,
data=team.get_audit_log_data(),
)
messages.add_message(request, messages.SUCCESS,
_('Changes to your team were saved.'))
return HttpResponseRedirect(reverse('sentry-manage-team', args=[organization.slug, team.slug]))
if request.user.is_superuser:
can_remove_team = True
else:
can_remove_team = OrganizationMember.objects.filter(
Q(has_global_access=True) | Q(teams=team),
user=request.user,
type__lte=OrganizationMemberType.OWNER,
).exists()
context = {
'form': form,
'can_remove_team': can_remove_team,
}
return self.respond('sentry/teams/manage.html', context)
| bsd-3-clause |
expyriment/expyriment | expyriment/io/_mouse.py | 1 | 25968 | """
Mouse input.
This module contains a class implementing pygame mouse input.
"""
__author__ = 'Florian Krause <florian@expyriment.org>, \
Oliver Lindemann <oliver@expyriment.org>'
__version__ = ''
__revision__ = ''
__date__ = ''
from types import FunctionType
import pygame
from . import defaults
from ..misc._timer import get_time
from ..misc import is_android_running
from ._input_output import Input
from .. import _internals, misc
class Mouse(Input):
"""A class implementing a mouse input.
Calling ``expyriment.control.initialize(exp)`` will automatically create a
mouse instance and will reference it in exp.mouse for easy access.
"""
#static class properties for quit_events
_quit_corner_location = None
_corner_rect_size = (30, 30)
_quit_action_events = []
def __init__(self, show_cursor=True, track_button_events=None,
track_motion_events=None):
"""Initialize a mouse input.
Parameters
----------
show_cursor : bool, optional
shows mouse cursor (default = True)
track_button_events : bool, optional
track button events via Pygame queue (default = True)
track_motion_events : bool, optional
track motion events via Pygame queue (default = False)
Notes
-----
(a) It is strongly suggest to avoid tracking of motions events,
(track_motion_events=True), because it quickly causes an overflow in
the Pygame event queue and you might consequently loose important
events.
(b) Turning the mouse wheel causes button_down_events. Thus,
turning the mouse wheel rather extensively causes an overflow of
the Pygame event queue. You might consider turn off the default
tracking of mouse button event by calling
`experiment.mouse.track_button_events = False`.
(c) See ``process_quit_event`` for the forced quitting of experiments
via mouse events.
"""
Input.__init__(self)
if is_android_running():
Mouse._quit_corner_location = 1
if show_cursor is None:
show_cursor = defaults.mouse_show_cursor
if track_button_events is None:
track_button_events = defaults.mouse_track_button_events
if track_motion_events is None:
track_motion_events = defaults.mouse_track_motion_events
if show_cursor:
self.show_cursor(track_button_events, track_motion_events)
else:
self.track_button_events = track_button_events
self.track_motion_events = track_motion_events
@staticmethod
def set_quit_corner_location(corner, corner_rect_size=(None, None)):
"""Activate the possibility to quit experiment using a mouse event.
Defines the corner on which the user has to click to elicit a
quit dialog.
If quit corner location has been defined, clicking quickly three
times (i.e., within 1 second) in the specified corner forces
experiment to quit.
To switch off the detection of mouse quit events, please call
``Mouse.set_quit_corner_location(corner=None)``.
Changing the corner and rect_size always affects all mouse
instances.
Parameters
----------
corner: int or None
location code (0,1, 2 or 3) of the quit corner; the default value
under Android is 1, otherwise None; see also the notes
below.
corner_rect_size = tuple (int, int), optional
size of the field (rect) that detects the quit action in one
corner of the screen; default = (30, 30)
Notes
-----
Mouse quit events are especially useful for experiments on devices
without hardware keyboard, such as tablet PCs or smartphones.
Corner location codes::
0 = upper left corner, 1 = upper right corner (0) (1)
2 = lower right corner, 3 = lower left corner (3) (2)
otherwise the detection of mouse quit events is deactivated
The detection of mouse quit events is activated by default under
Android.
"""
if corner is not None:
if not isinstance(corner, int) or corner<0 or corner >3:
corner = None
print("Warning: {} is an unkown corner location. Mouse quit "
"event is deactivated.".format(corner))
Mouse._quit_corner_location = corner
try:
Mouse._corner_rect_size = (int(corner_rect_size[0]),
int(corner_rect_size[1]))
except:
pass
@staticmethod
def process_quit_event(click_position=None):
"""Check if mouse exit action has been performed.
If quit corner location is has been defined via
``Mouse.set_quit_corner_location()`` (i.e. 0, 1, 2 or 3), clicking
quickly three times (i.e., within 1 second) in one of the corners of
the screen forces the experiment to quit.
The function is called automatically by all mouse get event and wait
methods (similar to ``Keyboard.process_control_keys``). If no mouse
functions are called by your program, this function can be polled to
ensure quitting experiment by mouse.
Parameters
----------
click_position : tuple of int (x,y), optional
clicked location to be processed. If not defined, the Pygame event
queue will be checked for mouse down events and the current
position is taken
Returns
-------
out : bool, optional
True if exit action has been performed
False otherwise
See Also
--------
set_quit_corner_location
"""
if Mouse._quit_corner_location not in (0, 1, 2, 3):
return False
if click_position is None:
# check Pygame queu
pos = None
# pygame.event.pump() # not sure if it is required!
for event in pygame.event.get(pygame.MOUSEBUTTONDOWN):
if event.button > 0:
screen_size = _internals.active_exp.screen.surface.get_size()
pos = pygame.mouse.get_pos()
pos = (pos[0] - screen_size[0] // 2,
-pos[1] + screen_size[1] // 2)
break
if pos is None:
return False
else:
return Mouse.process_quit_event(click_position=pos)
# determine threshold x & y
if Mouse._quit_corner_location == 0 or Mouse._quit_corner_location == 3: # left
threshold_x = -_internals.active_exp.screen.center_x + \
Mouse._corner_rect_size[0]
else:# right
threshold_x = _internals.active_exp.screen.center_x - \
Mouse._corner_rect_size[0]
if Mouse._quit_corner_location == 0 or Mouse._quit_corner_location == 1: # upper
threshold_y = _internals.active_exp.screen.center_y - \
Mouse._corner_rect_size[1]
else:# lower
threshold_y = -_internals.active_exp.screen.center_y + \
Mouse._corner_rect_size[1]
# check
if (Mouse._quit_corner_location == 0 and \
click_position[0] < threshold_x and \
click_position[1] > threshold_y) \
or (Mouse._quit_corner_location == 1 and \
click_position[0] > threshold_x and \
click_position[1] > threshold_y) \
or (Mouse._quit_corner_location == 2 and \
click_position[0] > threshold_x and \
click_position[1] < threshold_y) \
or (Mouse._quit_corner_location == 3 and \
click_position[0] < threshold_x and \
click_position[1] < threshold_y):
Mouse._quit_action_events.append(get_time())
if len(Mouse._quit_action_events)>=3:
diff = get_time()-Mouse._quit_action_events.pop(0)
if (diff < 1):
# simulate quit key
simulated_key = pygame.event.Event(
pygame.KEYDOWN,
{'key': _internals.active_exp.keyboard.get_quit_key()})
return _internals.active_exp.keyboard.process_control_keys(
key_event=simulated_key)
return False
@property
def track_button_events(self):
"""Getter for track_button_events."""
return self._track_button_events
@track_button_events.setter
def track_button_events(self, value):
"""Setter for track_button_events.
Switch on/off the processing of button and wheel events.
"""
self._track_button_events = value
if value:
pygame.event.set_allowed(pygame.MOUSEBUTTONDOWN)
pygame.event.set_allowed(pygame.MOUSEBUTTONUP)
else:
pygame.event.set_blocked(pygame.MOUSEBUTTONDOWN)
pygame.event.set_blocked(pygame.MOUSEBUTTONUP)
@property
def track_motion_events(self):
"""Getter for track_motion_events.
Switch on/off the buffering of motion events in the Pygame event queue.
Notes
-----
It is strongly suggest to avoid tracking of motions events,
(track_motion_events=True), because it quickly causes an overflow in
the Pygame event queue and you might consequently loose important
events.
"""
return self._track_motion_events
@track_motion_events.setter
def track_motion_events(self, value):
"""Setter for track_motion_events.
Switch on/off the processing of motion events.
"""
self._track_motion_events = value
if value:
pygame.event.set_allowed(pygame.MOUSEMOTION)
else:
pygame.event.set_blocked(pygame.MOUSEMOTION)
@property
def pressed_buttons(self):
"""Getter for pressed_buttons."""
pygame.event.pump()
return pygame.mouse.get_pressed()
@property
def is_cursor_visible(self):
"""Getter for is_cursor_visible"""
visible = pygame.mouse.set_visible(False)
pygame.mouse.set_visible(visible)
return visible
def get_last_button_down_event(self, process_quit_event=True):
"""Get the last button down event.
All earlier button down events will be removed from the queue.
Parameters
----------
process_quit_event : boolean, optional
if False, the current location will not be processed for mouse
quitting events in the case that a button down event has been
found (default = True).
Returns
-------
btn_id : int
button number (0,1,2) or 3 for wheel up or 4 for wheel down,
if quit screen mouse action has been performed, the method
returns -1
"""
rtn = None
for event in pygame.event.get(pygame.MOUSEBUTTONDOWN):
if event.button > 0:
rtn = event.button - 1
if rtn==0:
if process_quit_event and Mouse.process_quit_event(self.position):
return -1
return rtn
def get_last_button_up_event(self):
"""Get the last button up event.
All earlier button up events will be removed from the queue.
Returns
-------
btn_id : int
button number (0,1,2)
if quit screen mouse action has been performed, the method
returns -1
"""
rtn = None
for event in pygame.event.get(pygame.MOUSEBUTTONUP):
if event.button > 0:
rtn = event.button - 1
return rtn
def check_button_pressed(self, button_number):
"""Return False or button id if a specific button is currently pressed.
Parameters
----------
button_number : int
the button number (0,1,2) to be checked
Returns
-------
is_pressed: boolean
"""
btns = self.pressed_buttons
if len(btns) >= 1 and button_number >= 0:
return btns[button_number]
else:
return False
def check_wheel(self):
"""Check the mouse wheel.
Returns
-------
direction : str
"up" or "down" if mouse wheel has been recently rotated
upwards or downwards otherwise it returns None.
"""
evt = self.get_last_button_down_event()
if evt == 3:
return "up"
elif evt == 4:
return "down"
else:
return None
@property
def position(self):
"""Getter for position."""
pygame.event.pump()
screen_size = _internals.active_exp.screen.surface.get_size()
pos = pygame.mouse.get_pos()
return (pos[0] - screen_size[0] // 2, -pos[1] + screen_size[1] // 2)
@position.setter
def position(self, position):
"""Setter for position."""
screen_size = _internals.active_exp.screen.surface.get_size()
pos = (position[0] + screen_size[0] // 2,
- position[1] + screen_size[1] // 2)
pygame.mouse.set_pos(pos)
def set_cursor(self, size, hotspot, xormasks, andmasks):
"""Set the cursor.
Parameters
----------
size : (int, int)
size of the cursor
hotspot : (int, int)
position of the hotspot (0,0 is top left)
xormask : list
sequence of bytes with cursor xor data masks
andmask : list
sequence of bytes with cursor bitmask data
"""
return pygame.mouse.set_cursor(size, hotspot, xormasks, andmasks)
def get_cursor(self):
"""Get the cursor."""
return pygame.mouse.get_cursor()
def clear(self):
"""Clear the event cue from mouse events."""
pygame.event.clear(pygame.MOUSEBUTTONDOWN)
pygame.event.clear(pygame.MOUSEBUTTONUP)
pygame.event.clear(pygame.MOUSEMOTION)
if self._logging:
_internals.active_exp._event_file_log("Mouse,cleared", 2)
def wait_event(self, wait_button=True, wait_motion=True, buttons=None,
duration=None, wait_for_buttonup=False,
callback_function=None, process_control_events=True):
"""Wait for a mouse event (i.e., motion, button press or wheel event).
Button id coding:
- None for no mouse button event or
- 0,1,2 for left. middle and right button or
- 3 for wheel up or
- 4 for wheel down (wheel works only for keydown events).
Parameters
----------
wait_button : bool, optional
set 'False' to ignore for a button presses (default=True)
wait_motion : bool, optional
set 'False' to ignore for a mouse motions (default=True)
buttons : int or list, optional
a specific button or list of buttons to wait for
duration : int, optional
the maximal time to wait in ms
wait_for_buttonup : bool, optional
if True it waits for button-up default=False)
callback_function : function, optional
function to repeatedly execute during waiting loop
process_control_events : bool, optional
process ``io.keyboard.process_control_keys()`` and
``io.mouse.process_quit_event()`` (default = True)
Returns
-------
event_id : int
id of the event that quited waiting
move : bool
True if a motion occurred
pos : (int, int)
mouse position (tuple)
rt : int
reaction time
Notes
------
This will also by default process control events (quit and pause).
Thus, keyboard events will be cleared from the cue and cannot be
received by a ``Keyboard().check()`` anymore!
See Also
--------
expyriment.design.Experiment.register_wait_callback_function
"""
if _internals.skip_wait_methods:
return None, None, None, None
start = get_time()
self.clear()
old_pos = pygame.mouse.get_pos()
btn_id = None
rt = None
motion_occured = False
if buttons is None:
buttons = [0, 1, 2, 3, 4]
else:
try:
buttons = list(buttons)
except:
buttons = [buttons]
while True:
if isinstance(callback_function, FunctionType):
rtn_callback = callback_function()
if isinstance(rtn_callback, _internals.CallbackQuitEvent):
btn_id = rtn_callback
rt = int((get_time() - start) * 1000)
break
if _internals.active_exp.is_initialized:
rtn_callback = _internals.active_exp._execute_wait_callback()
if isinstance(rtn_callback, _internals.CallbackQuitEvent):
btn_id = rtn_callback
rt = int((get_time() - start) * 1000)
break
if process_control_events:
if _internals.active_exp.keyboard.process_control_keys():
break
if wait_motion:
motion_occured = old_pos != pygame.mouse.get_pos()
if wait_button:
if wait_for_buttonup:
btn_id = self.get_last_button_up_event()
else:
btn_id = self.get_last_button_down_event(
process_quit_event=process_control_events)
if btn_id ==-1:
btn_id = None
break
elif btn_id in buttons or motion_occured:
rt = int((get_time() - start) * 1000)
break
elif (duration is not None and \
int((get_time() - start) * 1000) >= duration):
break
position_in_expy_coordinates = self.position
if self._logging:
_internals.active_exp._event_file_log(
"Mouse,received,{0}-{1},wait_event".format(btn_id, motion_occured))
return btn_id, motion_occured, position_in_expy_coordinates, rt
def wait_press(self, buttons=None, duration=None, wait_for_buttonup=False,
callback_function=None, process_control_events=True):
"""Wait for a mouse button press or mouse wheel event.
Parameters
----------
buttons : int or list, optional
a specific button or list of buttons to wait for
duration : int, optional
maximal time to wait in ms
wait_for_buttonup : bool, optional
if True it waits for button-up
callback_function : function, optional
function to repeatedly execute during waiting loop
process_control_events : bool, optional
process ``io.keyboard.process_control_keys()`` and
``io.mouse.process_quit_event()`` (default = false)
Returns
-------
event_id : int
id of the event that quited waiting
pos : (int, int)
mouse position (tuple)
rt : int
reaction time
Notes
------
button id coding
- None for no mouse button event or
- 0,1,2 for left. middle and right button or
- 3 for wheel up or
- 4 for wheel down (wheel works only for keydown events).
"""
rtn = self.wait_event(wait_button=True, wait_motion=False,
buttons=buttons, duration=duration,
wait_for_buttonup=wait_for_buttonup,
callback_function=callback_function,
process_control_events=process_control_events)
return rtn[0], rtn[2], rtn[3]
def wait_motion(self, duration=None, callback_function=None,
process_control_events=True):
"""Wait for a mouse motion.
Parameters
----------
duration : int, optional
maximal time to wait in ms
callback_function : function, optional
function to repeatedly execute during waiting loop
process_control_events : bool, optional
process ``io.keyboard.process_control_keys()`` and
``io.mouse.process_quit_event()`` (default = false)
Returns
-------
pos : (int, int)
mouse position (tuple)
rt : int
reaction time
"""
rtn = self.wait_event(wait_button=False, wait_motion=True, buttons=[],
duration=duration, wait_for_buttonup=False,
callback_function=callback_function,
process_control_events=process_control_events)
if isinstance(rtn[0], _internals.CallbackQuitEvent):
return rtn[0], rtn[3]
else:
return rtn[2], rtn[3]
def show_cursor(self, track_button_events=True, track_motion_events=False):
"""Show the cursor.
Parameters
----------
track_button_events : bool, optional
tracking button events (default = True)
track_motion_events : bool, optional
tracking motion events (default = False)
"""
pygame.mouse.set_visible(True)
self.track_button_events = track_button_events
self.track_motion_events = track_motion_events
def hide_cursor(self, track_button_events=False, track_motion_events=False):
"""Hide the cursor.
Parameters
----------
track_button_events : bool, optional
tracking button events (default = False)
track_motion_events : bool, optional
tracking motion events (default = False)
"""
pygame.mouse.set_visible(False)
self.track_button_events = track_button_events
self.track_motion_events = track_motion_events
@staticmethod
def _self_test(exp):
"""Test the mouse.
Returns
-------
polling_time : int
polling time
buttons_work : int
1 -- if mouse test was ended with mouse button,
0 -- if testing has been quit with q
"""
from .. import stimuli
# measure mouse polling time
info = """This will test how timing accurate your mouse is.
[Press RETURN to continue]"""
stimuli.TextScreen("Mouse test (1)", info).present()
exp.keyboard.wait(misc.constants.K_RETURN)
mouse = Mouse()
go = stimuli.TextLine("Keep on moving...")
go.preload()
stimuli.TextLine("Please move the mouse").present()
mouse.wait_motion()
go.present()
exp.clock.reset_stopwatch()
motion = []
while exp.clock.stopwatch_time < 200:
_pos, rt = mouse.wait_motion()
motion.append(rt)
stimuli.TextLine("Thanks").present()
polling_time = misc.statistics.mode(motion)
info = """Your mouse polling time is {0} ms.
[Press RETURN to continue] """.format(polling_time)
text = stimuli.TextScreen("Results", info)
text.present()
exp.keyboard.wait([misc.constants.K_RETURN])
info = """This will test if you mouse buttons work.
Please press all buttons one after the other to see if the corresponding buttons on the screen light up.
When done, click inside one of the buttons on the screen to end the test.
If your mouse buttons do not work, you can quit by pressing q.
[Press RETURN to continue]"""
stimuli.TextScreen("Mouse test (2)", info).present()
exp.keyboard.wait(misc.constants.K_RETURN)
# test mouse clicking
rects = [stimuli.Rectangle(size=[30, 30], position=[-50, 0]),
stimuli.Rectangle(size=[30, 30], position=[0, 0]),
stimuli.Rectangle(size=[30, 30], position=[50, 0])]
canvas = stimuli.Canvas(size=[350, 500])
btn = None
go_on = True
while go_on:
canvas.clear_surface()
for cnt, r in enumerate(rects):
r.unload()
if cnt == btn:
r.colour = misc.constants.C_YELLOW
else:
r.colour = misc.constants.C_RED
r.plot(canvas)
if btn == 3:
text = "Mouse wheel UP"
elif btn == 4:
text = "Mouse wheel DOWN"
else:
text = ""
stimuli.TextLine(text, position=[0, 50]).plot(canvas)
canvas.present()
btn = None
while btn is None:
btn = mouse.get_last_button_down_event()
if btn is not None:
position = mouse.position
for r in rects:
if r.overlapping_with_position(position):
buttons_work = 1
mouse.hide_cursor()
go_on = False
break
elif exp.keyboard.check(keys=misc.constants.K_q):
buttons_work = 0
mouse.hide_cursor()
go_on = False
break
result = {}
result["testsuite_mouse_polling_time"] = str(polling_time) + " ms"
result["testsuite_mouse_buttons_work"] = buttons_work
return result
| gpl-3.0 |
srene/ns-3-inrpp | src/buildings/bindings/modulegen__gcc_LP64.py | 38 | 297879 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.buildings', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration]
module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'], import_from_module='ns.propagation')
## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration]
module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'], import_from_module='ns.propagation')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## box.h (module 'mobility'): ns3::Box [class]
module.add_class('Box', import_from_module='ns.mobility')
## box.h (module 'mobility'): ns3::Box::Side [enumeration]
module.add_enum('Side', ['RIGHT', 'LEFT', 'TOP', 'BOTTOM', 'UP', 'DOWN'], outer_class=root_module['ns3::Box'], import_from_module='ns.mobility')
## building-container.h (module 'buildings'): ns3::BuildingContainer [class]
module.add_class('BuildingContainer')
## building-list.h (module 'buildings'): ns3::BuildingList [class]
module.add_class('BuildingList')
## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper [class]
module.add_class('BuildingsHelper')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper [class]
module.add_class('ConstantVelocityHelper', import_from_module='ns.mobility')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## position-allocator.h (module 'mobility'): ns3::PositionAllocator [class]
module.add_class('PositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object'])
## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator [class]
module.add_class('RandomBoxPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator [class]
module.add_class('RandomBuildingPositionAllocator', parent=root_module['ns3::PositionAllocator'])
## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator [class]
module.add_class('RandomDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator [class]
module.add_class('RandomRectanglePositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator [class]
module.add_class('RandomRoomPositionAllocator', parent=root_module['ns3::PositionAllocator'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class]
module.add_class('RangePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator [class]
module.add_class('SameRoomPositionAllocator', parent=root_module['ns3::PositionAllocator'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class]
module.add_class('ThreeLogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class]
module.add_class('TwoRayGroundPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator [class]
module.add_class('UniformDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## box.h (module 'mobility'): ns3::BoxChecker [class]
module.add_class('BoxChecker', import_from_module='ns.mobility', parent=root_module['ns3::AttributeChecker'])
## box.h (module 'mobility'): ns3::BoxValue [class]
module.add_class('BoxValue', import_from_module='ns.mobility', parent=root_module['ns3::AttributeValue'])
## building.h (module 'buildings'): ns3::Building [class]
module.add_class('Building', parent=root_module['ns3::Object'])
## building.h (module 'buildings'): ns3::Building::BuildingType_t [enumeration]
module.add_enum('BuildingType_t', ['Residential', 'Office', 'Commercial'], outer_class=root_module['ns3::Building'])
## building.h (module 'buildings'): ns3::Building::ExtWallsType_t [enumeration]
module.add_enum('ExtWallsType_t', ['Wood', 'ConcreteWithWindows', 'ConcreteWithoutWindows', 'StoneBlocks'], outer_class=root_module['ns3::Building'])
## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel [class]
module.add_class('BuildingsPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator [class]
module.add_class('FixedRoomPositionAllocator', parent=root_module['ns3::PositionAllocator'])
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class]
module.add_class('FixedRssLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class]
module.add_class('FriisPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator [class]
module.add_class('GridBuildingAllocator', parent=root_module['ns3::Object'])
## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator [class]
module.add_class('GridPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType [enumeration]
module.add_enum('LayoutType', ['ROW_FIRST', 'COLUMN_FIRST'], outer_class=root_module['ns3::GridPositionAllocator'], import_from_module='ns.mobility')
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel [class]
module.add_class('HybridBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel [class]
module.add_class('ItuR1238PropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator [class]
module.add_class('ListPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class]
module.add_class('LogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class]
module.add_class('MatrixPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo [class]
module.add_class('MobilityBuildingInfo', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class]
module.add_class('NakagamiPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel [class]
module.add_class('OhBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Box_methods(root_module, root_module['ns3::Box'])
register_Ns3BuildingContainer_methods(root_module, root_module['ns3::BuildingContainer'])
register_Ns3BuildingList_methods(root_module, root_module['ns3::BuildingList'])
register_Ns3BuildingsHelper_methods(root_module, root_module['ns3::BuildingsHelper'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3ConstantVelocityHelper_methods(root_module, root_module['ns3::ConstantVelocityHelper'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PositionAllocator_methods(root_module, root_module['ns3::PositionAllocator'])
register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel'])
register_Ns3RandomBoxPositionAllocator_methods(root_module, root_module['ns3::RandomBoxPositionAllocator'])
register_Ns3RandomBuildingPositionAllocator_methods(root_module, root_module['ns3::RandomBuildingPositionAllocator'])
register_Ns3RandomDiscPositionAllocator_methods(root_module, root_module['ns3::RandomDiscPositionAllocator'])
register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel'])
register_Ns3RandomRectanglePositionAllocator_methods(root_module, root_module['ns3::RandomRectanglePositionAllocator'])
register_Ns3RandomRoomPositionAllocator_methods(root_module, root_module['ns3::RandomRoomPositionAllocator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel'])
register_Ns3SameRoomPositionAllocator_methods(root_module, root_module['ns3::SameRoomPositionAllocator'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel'])
register_Ns3UniformDiscPositionAllocator_methods(root_module, root_module['ns3::UniformDiscPositionAllocator'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BoxChecker_methods(root_module, root_module['ns3::BoxChecker'])
register_Ns3BoxValue_methods(root_module, root_module['ns3::BoxValue'])
register_Ns3Building_methods(root_module, root_module['ns3::Building'])
register_Ns3BuildingsPropagationLossModel_methods(root_module, root_module['ns3::BuildingsPropagationLossModel'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FixedRoomPositionAllocator_methods(root_module, root_module['ns3::FixedRoomPositionAllocator'])
register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel'])
register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3GridBuildingAllocator_methods(root_module, root_module['ns3::GridBuildingAllocator'])
register_Ns3GridPositionAllocator_methods(root_module, root_module['ns3::GridPositionAllocator'])
register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, root_module['ns3::HybridBuildingsPropagationLossModel'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3ItuR1238PropagationLossModel_methods(root_module, root_module['ns3::ItuR1238PropagationLossModel'])
register_Ns3ListPositionAllocator_methods(root_module, root_module['ns3::ListPositionAllocator'])
register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel'])
register_Ns3MobilityBuildingInfo_methods(root_module, root_module['ns3::MobilityBuildingInfo'])
register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OhBuildingsPropagationLossModel_methods(root_module, root_module['ns3::OhBuildingsPropagationLossModel'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Box_methods(root_module, cls):
cls.add_output_stream_operator()
## box.h (module 'mobility'): ns3::Box::Box(ns3::Box const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Box const &', 'arg0')])
## box.h (module 'mobility'): ns3::Box::Box(double _xMin, double _xMax, double _yMin, double _yMax, double _zMin, double _zMax) [constructor]
cls.add_constructor([param('double', '_xMin'), param('double', '_xMax'), param('double', '_yMin'), param('double', '_yMax'), param('double', '_zMin'), param('double', '_zMax')])
## box.h (module 'mobility'): ns3::Box::Box() [constructor]
cls.add_constructor([])
## box.h (module 'mobility'): ns3::Vector ns3::Box::CalculateIntersection(ns3::Vector const & current, ns3::Vector const & speed) const [member function]
cls.add_method('CalculateIntersection',
'ns3::Vector',
[param('ns3::Vector const &', 'current'), param('ns3::Vector const &', 'speed')],
is_const=True)
## box.h (module 'mobility'): ns3::Box::Side ns3::Box::GetClosestSide(ns3::Vector const & position) const [member function]
cls.add_method('GetClosestSide',
'ns3::Box::Side',
[param('ns3::Vector const &', 'position')],
is_const=True)
## box.h (module 'mobility'): bool ns3::Box::IsInside(ns3::Vector const & position) const [member function]
cls.add_method('IsInside',
'bool',
[param('ns3::Vector const &', 'position')],
is_const=True)
## box.h (module 'mobility'): ns3::Box::xMax [variable]
cls.add_instance_attribute('xMax', 'double', is_const=False)
## box.h (module 'mobility'): ns3::Box::xMin [variable]
cls.add_instance_attribute('xMin', 'double', is_const=False)
## box.h (module 'mobility'): ns3::Box::yMax [variable]
cls.add_instance_attribute('yMax', 'double', is_const=False)
## box.h (module 'mobility'): ns3::Box::yMin [variable]
cls.add_instance_attribute('yMin', 'double', is_const=False)
## box.h (module 'mobility'): ns3::Box::zMax [variable]
cls.add_instance_attribute('zMax', 'double', is_const=False)
## box.h (module 'mobility'): ns3::Box::zMin [variable]
cls.add_instance_attribute('zMin', 'double', is_const=False)
return
def register_Ns3BuildingContainer_methods(root_module, cls):
## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::BuildingContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BuildingContainer const &', 'arg0')])
## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer() [constructor]
cls.add_constructor([])
## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::Ptr<ns3::Building> building) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')])
## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(std::string buildingName) [constructor]
cls.add_constructor([param('std::string', 'buildingName')])
## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::BuildingContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::BuildingContainer', 'other')])
## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::Ptr<ns3::Building> building) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Building >', 'building')])
## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(std::string buildingName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'buildingName')])
## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >',
[],
is_const=True)
## building-container.h (module 'buildings'): void ns3::BuildingContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >',
[],
is_const=True)
## building-container.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::BuildingContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Building >',
[param('uint32_t', 'i')],
is_const=True)
## building-container.h (module 'buildings'): static ns3::BuildingContainer ns3::BuildingContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::BuildingContainer',
[],
is_static=True)
## building-container.h (module 'buildings'): uint32_t ns3::BuildingContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3BuildingList_methods(root_module, cls):
## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList() [constructor]
cls.add_constructor([])
## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList(ns3::BuildingList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BuildingList const &', 'arg0')])
## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::Add(ns3::Ptr<ns3::Building> building) [member function]
cls.add_method('Add',
'uint32_t',
[param('ns3::Ptr< ns3::Building >', 'building')],
is_static=True)
## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::Begin() [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >',
[],
is_static=True)
## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::End() [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >',
[],
is_static=True)
## building-list.h (module 'buildings'): static ns3::Ptr<ns3::Building> ns3::BuildingList::GetBuilding(uint32_t n) [member function]
cls.add_method('GetBuilding',
'ns3::Ptr< ns3::Building >',
[param('uint32_t', 'n')],
is_static=True)
## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::GetNBuildings() [member function]
cls.add_method('GetNBuildings',
'uint32_t',
[],
is_static=True)
return
def register_Ns3BuildingsHelper_methods(root_module, cls):
## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper() [constructor]
cls.add_constructor([])
## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper(ns3::BuildingsHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BuildingsHelper const &', 'arg0')])
## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::Install(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Install',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_static=True)
## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'void',
[param('ns3::NodeContainer', 'c')],
is_static=True)
## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeConsistent(ns3::Ptr<ns3::MobilityModel> bmm) [member function]
cls.add_method('MakeConsistent',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'bmm')],
is_static=True)
## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeMobilityModelConsistent() [member function]
cls.add_method('MakeMobilityModelConsistent',
'void',
[],
is_static=True)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3ConstantVelocityHelper_methods(root_module, cls):
## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::ConstantVelocityHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantVelocityHelper const &', 'arg0')])
## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper() [constructor]
cls.add_constructor([])
## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position) [constructor]
cls.add_constructor([param('ns3::Vector const &', 'position')])
## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position, ns3::Vector const & vel) [constructor]
cls.add_constructor([param('ns3::Vector const &', 'position'), param('ns3::Vector const &', 'vel')])
## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetCurrentPosition() const [member function]
cls.add_method('GetCurrentPosition',
'ns3::Vector',
[],
is_const=True)
## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetVelocity() const [member function]
cls.add_method('GetVelocity',
'ns3::Vector',
[],
is_const=True)
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Pause() [member function]
cls.add_method('Pause',
'void',
[])
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetPosition(ns3::Vector const & position) [member function]
cls.add_method('SetPosition',
'void',
[param('ns3::Vector const &', 'position')])
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetVelocity(ns3::Vector const & vel) [member function]
cls.add_method('SetVelocity',
'void',
[param('ns3::Vector const &', 'vel')])
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Unpause() [member function]
cls.add_method('Unpause',
'void',
[])
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Update() const [member function]
cls.add_method('Update',
'void',
[],
is_const=True)
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Rectangle const & rectangle) const [member function]
cls.add_method('UpdateWithBounds',
'void',
[param('ns3::Rectangle const &', 'rectangle')],
is_const=True)
## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Box const & bounds) const [member function]
cls.add_method('UpdateWithBounds',
'void',
[param('ns3::Box const &', 'bounds')],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator(ns3::PositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): int64_t ns3::PositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::PositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::PositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3PropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel >', 'next')])
## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function]
cls.add_method('GetNext',
'ns3::Ptr< ns3::PropagationLossModel >',
[])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3RandomBoxPositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator(ns3::RandomBoxPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomBoxPositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): int64_t ns3::RandomBoxPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomBoxPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomBoxPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function]
cls.add_method('SetX',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'x')])
## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function]
cls.add_method('SetY',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'y')])
## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetZ(ns3::Ptr<ns3::RandomVariableStream> z) [member function]
cls.add_method('SetZ',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'z')])
return
def register_Ns3RandomBuildingPositionAllocator_methods(root_module, cls):
## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator(ns3::RandomBuildingPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomBuildingPositionAllocator const &', 'arg0')])
## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator() [constructor]
cls.add_constructor([])
## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomBuildingPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomBuildingPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomBuildingPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3RandomDiscPositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator(ns3::RandomDiscPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomDiscPositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): int64_t ns3::RandomDiscPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomDiscPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomDiscPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetRho(ns3::Ptr<ns3::RandomVariableStream> rho) [member function]
cls.add_method('SetRho',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'rho')])
## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetTheta(ns3::Ptr<ns3::RandomVariableStream> theta) [member function]
cls.add_method('SetTheta',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'theta')])
## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetX(double x) [member function]
cls.add_method('SetX',
'void',
[param('double', 'x')])
## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetY(double y) [member function]
cls.add_method('SetY',
'void',
[param('double', 'y')])
return
def register_Ns3RandomPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3RandomRectanglePositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator(ns3::RandomRectanglePositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomRectanglePositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): int64_t ns3::RandomRectanglePositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomRectanglePositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomRectanglePositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function]
cls.add_method('SetX',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'x')])
## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function]
cls.add_method('SetY',
'void',
[param('ns3::Ptr< ns3::RandomVariableStream >', 'y')])
return
def register_Ns3RandomRoomPositionAllocator_methods(root_module, cls):
## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator(ns3::RandomRoomPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomRoomPositionAllocator const &', 'arg0')])
## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator() [constructor]
cls.add_constructor([])
## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomRoomPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomRoomPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomRoomPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3RangePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3SameRoomPositionAllocator_methods(root_module, cls):
## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::SameRoomPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SameRoomPositionAllocator const &', 'arg0')])
## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator() [constructor]
cls.add_constructor([])
## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::NodeContainer c) [constructor]
cls.add_constructor([param('ns3::NodeContainer', 'c')])
## building-position-allocator.h (module 'buildings'): int64_t ns3::SameRoomPositionAllocator::AssignStreams(int64_t arg0) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'arg0')],
is_virtual=True)
## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::SameRoomPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::SameRoomPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetFrequency(double frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'frequency')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function]
cls.add_method('SetHeightAboveZ',
'void',
[param('double', 'heightAboveZ')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3UniformDiscPositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator(ns3::UniformDiscPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UniformDiscPositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): int64_t ns3::UniformDiscPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::UniformDiscPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::UniformDiscPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetRho(double rho) [member function]
cls.add_method('SetRho',
'void',
[param('double', 'rho')])
## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetX(double x) [member function]
cls.add_method('SetX',
'void',
[param('double', 'x')])
## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetY(double y) [member function]
cls.add_method('SetY',
'void',
[param('double', 'y')])
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BoxChecker_methods(root_module, cls):
## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker() [constructor]
cls.add_constructor([])
## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker(ns3::BoxChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BoxChecker const &', 'arg0')])
return
def register_Ns3BoxValue_methods(root_module, cls):
## box.h (module 'mobility'): ns3::BoxValue::BoxValue() [constructor]
cls.add_constructor([])
## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::BoxValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BoxValue const &', 'arg0')])
## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::Box const & value) [constructor]
cls.add_constructor([param('ns3::Box const &', 'value')])
## box.h (module 'mobility'): ns3::Ptr<ns3::AttributeValue> ns3::BoxValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## box.h (module 'mobility'): bool ns3::BoxValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## box.h (module 'mobility'): ns3::Box ns3::BoxValue::Get() const [member function]
cls.add_method('Get',
'ns3::Box',
[],
is_const=True)
## box.h (module 'mobility'): std::string ns3::BoxValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## box.h (module 'mobility'): void ns3::BoxValue::Set(ns3::Box const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Box const &', 'value')])
return
def register_Ns3Building_methods(root_module, cls):
## building.h (module 'buildings'): ns3::Building::Building(ns3::Building const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Building const &', 'arg0')])
## building.h (module 'buildings'): ns3::Building::Building(double xMin, double xMax, double yMin, double yMax, double zMin, double zMax) [constructor]
cls.add_constructor([param('double', 'xMin'), param('double', 'xMax'), param('double', 'yMin'), param('double', 'yMax'), param('double', 'zMin'), param('double', 'zMax')])
## building.h (module 'buildings'): ns3::Building::Building() [constructor]
cls.add_constructor([])
## building.h (module 'buildings'): void ns3::Building::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## building.h (module 'buildings'): ns3::Box ns3::Building::GetBoundaries() const [member function]
cls.add_method('GetBoundaries',
'ns3::Box',
[],
is_const=True)
## building.h (module 'buildings'): ns3::Building::BuildingType_t ns3::Building::GetBuildingType() const [member function]
cls.add_method('GetBuildingType',
'ns3::Building::BuildingType_t',
[],
is_const=True)
## building.h (module 'buildings'): ns3::Building::ExtWallsType_t ns3::Building::GetExtWallsType() const [member function]
cls.add_method('GetExtWallsType',
'ns3::Building::ExtWallsType_t',
[],
is_const=True)
## building.h (module 'buildings'): uint16_t ns3::Building::GetFloor(ns3::Vector position) const [member function]
cls.add_method('GetFloor',
'uint16_t',
[param('ns3::Vector', 'position')],
is_const=True)
## building.h (module 'buildings'): uint32_t ns3::Building::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## building.h (module 'buildings'): uint16_t ns3::Building::GetNFloors() const [member function]
cls.add_method('GetNFloors',
'uint16_t',
[],
is_const=True)
## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsX() const [member function]
cls.add_method('GetNRoomsX',
'uint16_t',
[],
is_const=True)
## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsY() const [member function]
cls.add_method('GetNRoomsY',
'uint16_t',
[],
is_const=True)
## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomX(ns3::Vector position) const [member function]
cls.add_method('GetRoomX',
'uint16_t',
[param('ns3::Vector', 'position')],
is_const=True)
## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomY(ns3::Vector position) const [member function]
cls.add_method('GetRoomY',
'uint16_t',
[param('ns3::Vector', 'position')],
is_const=True)
## building.h (module 'buildings'): static ns3::TypeId ns3::Building::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## building.h (module 'buildings'): bool ns3::Building::IsInside(ns3::Vector position) const [member function]
cls.add_method('IsInside',
'bool',
[param('ns3::Vector', 'position')],
is_const=True)
## building.h (module 'buildings'): void ns3::Building::SetBoundaries(ns3::Box box) [member function]
cls.add_method('SetBoundaries',
'void',
[param('ns3::Box', 'box')])
## building.h (module 'buildings'): void ns3::Building::SetBuildingType(ns3::Building::BuildingType_t t) [member function]
cls.add_method('SetBuildingType',
'void',
[param('ns3::Building::BuildingType_t', 't')])
## building.h (module 'buildings'): void ns3::Building::SetExtWallsType(ns3::Building::ExtWallsType_t t) [member function]
cls.add_method('SetExtWallsType',
'void',
[param('ns3::Building::ExtWallsType_t', 't')])
## building.h (module 'buildings'): void ns3::Building::SetNFloors(uint16_t nfloors) [member function]
cls.add_method('SetNFloors',
'void',
[param('uint16_t', 'nfloors')])
## building.h (module 'buildings'): void ns3::Building::SetNRoomsX(uint16_t nroomx) [member function]
cls.add_method('SetNRoomsX',
'void',
[param('uint16_t', 'nroomx')])
## building.h (module 'buildings'): void ns3::Building::SetNRoomsY(uint16_t nroomy) [member function]
cls.add_method('SetNRoomsY',
'void',
[param('uint16_t', 'nroomy')])
return
def register_Ns3BuildingsPropagationLossModel_methods(root_module, cls):
## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel::BuildingsPropagationLossModel() [constructor]
cls.add_constructor([])
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::BuildingsPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## buildings-propagation-loss-model.h (module 'buildings'): int64_t ns3::BuildingsPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='protected', is_virtual=True)
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::EvaluateSigma(ns3::Ptr<ns3::MobilityBuildingInfo> a, ns3::Ptr<ns3::MobilityBuildingInfo> b) const [member function]
cls.add_method('EvaluateSigma',
'double',
[param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a'), param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'b')],
is_const=True, visibility='protected')
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::ExternalWallLoss(ns3::Ptr<ns3::MobilityBuildingInfo> a) const [member function]
cls.add_method('ExternalWallLoss',
'double',
[param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a')],
is_const=True, visibility='protected')
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetShadowing(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetShadowing',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='protected')
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::HeightLoss(ns3::Ptr<ns3::MobilityBuildingInfo> n) const [member function]
cls.add_method('HeightLoss',
'double',
[param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'n')],
is_const=True, visibility='protected')
## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::InternalWallsLoss(ns3::Ptr<ns3::MobilityBuildingInfo> a, ns3::Ptr<ns3::MobilityBuildingInfo> b) const [member function]
cls.add_method('InternalWallsLoss',
'double',
[param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a'), param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'b')],
is_const=True, visibility='protected')
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FixedRoomPositionAllocator_methods(root_module, cls):
## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator::FixedRoomPositionAllocator(ns3::FixedRoomPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FixedRoomPositionAllocator const &', 'arg0')])
## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator::FixedRoomPositionAllocator(uint32_t x, uint32_t y, uint32_t z, ns3::Ptr<ns3::Building> b) [constructor]
cls.add_constructor([param('uint32_t', 'x'), param('uint32_t', 'y'), param('uint32_t', 'z'), param('ns3::Ptr< ns3::Building >', 'b')])
## building-position-allocator.h (module 'buildings'): int64_t ns3::FixedRoomPositionAllocator::AssignStreams(int64_t arg0) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'arg0')],
is_virtual=True)
## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::FixedRoomPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::FixedRoomPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3FixedRssLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function]
cls.add_method('SetRss',
'void',
[param('double', 'rss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3FriisPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetFrequency(double frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'frequency')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinLoss(double minLoss) [member function]
cls.add_method('SetMinLoss',
'void',
[param('double', 'minLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinLoss() const [member function]
cls.add_method('GetMinLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GridBuildingAllocator_methods(root_module, cls):
## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator(ns3::GridBuildingAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GridBuildingAllocator const &', 'arg0')])
## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator() [constructor]
cls.add_constructor([])
## building-allocator.h (module 'buildings'): ns3::BuildingContainer ns3::GridBuildingAllocator::Create(uint32_t n) const [member function]
cls.add_method('Create',
'ns3::BuildingContainer',
[param('uint32_t', 'n')],
is_const=True)
## building-allocator.h (module 'buildings'): static ns3::TypeId ns3::GridBuildingAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## building-allocator.h (module 'buildings'): void ns3::GridBuildingAllocator::SetBuildingAttribute(std::string n, ns3::AttributeValue const & v) [member function]
cls.add_method('SetBuildingAttribute',
'void',
[param('std::string', 'n'), param('ns3::AttributeValue const &', 'v')])
return
def register_Ns3GridPositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator(ns3::GridPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GridPositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): int64_t ns3::GridPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaX() const [member function]
cls.add_method('GetDeltaX',
'double',
[],
is_const=True)
## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaY() const [member function]
cls.add_method('GetDeltaY',
'double',
[],
is_const=True)
## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType ns3::GridPositionAllocator::GetLayoutType() const [member function]
cls.add_method('GetLayoutType',
'ns3::GridPositionAllocator::LayoutType',
[],
is_const=True)
## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinX() const [member function]
cls.add_method('GetMinX',
'double',
[],
is_const=True)
## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinY() const [member function]
cls.add_method('GetMinY',
'double',
[],
is_const=True)
## position-allocator.h (module 'mobility'): uint32_t ns3::GridPositionAllocator::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::GridPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::GridPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaX(double deltaX) [member function]
cls.add_method('SetDeltaX',
'void',
[param('double', 'deltaX')])
## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaY(double deltaY) [member function]
cls.add_method('SetDeltaY',
'void',
[param('double', 'deltaY')])
## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetLayoutType(ns3::GridPositionAllocator::LayoutType layoutType) [member function]
cls.add_method('SetLayoutType',
'void',
[param('ns3::GridPositionAllocator::LayoutType', 'layoutType')])
## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinX(double xMin) [member function]
cls.add_method('SetMinX',
'void',
[param('double', 'xMin')])
## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinY(double yMin) [member function]
cls.add_method('SetMinY',
'void',
[param('double', 'yMin')])
## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetN(uint32_t n) [member function]
cls.add_method('SetN',
'void',
[param('uint32_t', 'n')])
return
def register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, cls):
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::HybridBuildingsPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel::HybridBuildingsPropagationLossModel() [constructor]
cls.add_constructor([])
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetEnvironment(ns3::EnvironmentType env) [member function]
cls.add_method('SetEnvironment',
'void',
[param('ns3::EnvironmentType', 'env')])
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetCitySize(ns3::CitySize size) [member function]
cls.add_method('SetCitySize',
'void',
[param('ns3::CitySize', 'size')])
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetFrequency(double freq) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'freq')])
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetRooftopHeight(double rooftopHeight) [member function]
cls.add_method('SetRooftopHeight',
'void',
[param('double', 'rooftopHeight')])
## hybrid-buildings-propagation-loss-model.h (module 'buildings'): double ns3::HybridBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3ItuR1238PropagationLossModel_methods(root_module, cls):
## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel::ItuR1238PropagationLossModel() [constructor]
cls.add_constructor([])
## itu-r-1238-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::ItuR1238PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## itu-r-1238-propagation-loss-model.h (module 'buildings'): int64_t ns3::ItuR1238PropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3ListPositionAllocator_methods(root_module, cls):
## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator(ns3::ListPositionAllocator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ListPositionAllocator const &', 'arg0')])
## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator() [constructor]
cls.add_constructor([])
## position-allocator.h (module 'mobility'): void ns3::ListPositionAllocator::Add(ns3::Vector v) [member function]
cls.add_method('Add',
'void',
[param('ns3::Vector', 'v')])
## position-allocator.h (module 'mobility'): int64_t ns3::ListPositionAllocator::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## position-allocator.h (module 'mobility'): ns3::Vector ns3::ListPositionAllocator::GetNext() const [member function]
cls.add_method('GetNext',
'ns3::Vector',
[],
is_const=True, is_virtual=True)
## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::ListPositionAllocator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function]
cls.add_method('SetPathLossExponent',
'void',
[param('double', 'n')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function]
cls.add_method('GetPathLossExponent',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function]
cls.add_method('SetReference',
'void',
[param('double', 'referenceDistance'), param('double', 'referenceLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3MatrixPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function]
cls.add_method('SetLoss',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double defaultLoss) [member function]
cls.add_method('SetDefaultLoss',
'void',
[param('double', 'defaultLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3MobilityBuildingInfo_methods(root_module, cls):
## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo(ns3::MobilityBuildingInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MobilityBuildingInfo const &', 'arg0')])
## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo() [constructor]
cls.add_constructor([])
## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo(ns3::Ptr<ns3::Building> building) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')])
## mobility-building-info.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::MobilityBuildingInfo::GetBuilding() [member function]
cls.add_method('GetBuilding',
'ns3::Ptr< ns3::Building >',
[])
## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetFloorNumber() [member function]
cls.add_method('GetFloorNumber',
'uint8_t',
[])
## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetRoomNumberX() [member function]
cls.add_method('GetRoomNumberX',
'uint8_t',
[])
## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetRoomNumberY() [member function]
cls.add_method('GetRoomNumberY',
'uint8_t',
[])
## mobility-building-info.h (module 'buildings'): static ns3::TypeId ns3::MobilityBuildingInfo::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mobility-building-info.h (module 'buildings'): bool ns3::MobilityBuildingInfo::IsIndoor() [member function]
cls.add_method('IsIndoor',
'bool',
[])
## mobility-building-info.h (module 'buildings'): bool ns3::MobilityBuildingInfo::IsOutdoor() [member function]
cls.add_method('IsOutdoor',
'bool',
[])
## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetIndoor(ns3::Ptr<ns3::Building> building, uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function]
cls.add_method('SetIndoor',
'void',
[param('ns3::Ptr< ns3::Building >', 'building'), param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')])
## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetIndoor(uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function]
cls.add_method('SetIndoor',
'void',
[param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')])
## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetOutdoor() [member function]
cls.add_method('SetOutdoor',
'void',
[])
return
def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OhBuildingsPropagationLossModel_methods(root_module, cls):
## oh-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::OhBuildingsPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel::OhBuildingsPropagationLossModel() [constructor]
cls.add_constructor([])
## oh-buildings-propagation-loss-model.h (module 'buildings'): double ns3::OhBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
NavyaJayaram/MyRepository | SoundCloudUsingAJS/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 222 | 19653 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return {}
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to, and the system settings don't require
# bypassing the proxy for the current URL.
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def except_on_missing_scheme(url):
"""Given a URL, raise a MissingSchema exception if the scheme is missing.
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
if not scheme:
raise MissingSchema('Proxy URLs must have explicit schemes.')
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
| mit |
mancoast/CPythonPyc_test | fail/331_test_runpy.py | 29 | 23795 | # Test the runpy module
import unittest
import os
import os.path
import sys
import re
import tempfile
import importlib
import py_compile
from test.support import (
forget, make_legacy_pyc, run_unittest, unload, verbose, no_tracing,
create_empty_file)
from test.script_helper import (
make_pkg, make_script, make_zip_pkg, make_zip_script, temp_dir)
import runpy
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
example_source = """\
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
del f
# Check the sys module
import sys
run_argv0 = sys.argv[0]
run_name_in_sys_modules = __name__ in sys.modules
module_in_sys_modules = (run_name_in_sys_modules and
globals() is sys.modules[__name__].__dict__)
# Check nested operation
import runpy
nested = runpy._run_module_code('x=1\\n', mod_name='<run>')
"""
implicit_namespace = {
"__name__": None,
"__file__": None,
"__cached__": None,
"__package__": None,
"__doc__": None,
}
example_namespace = {
"sys": sys,
"runpy": runpy,
"result": ["Top level assignment", "Lower level reference"],
"run_argv0": sys.argv[0],
"run_name_in_sys_modules": False,
"module_in_sys_modules": False,
"nested": dict(implicit_namespace,
x=1, __name__="<run>", __loader__=None),
}
example_namespace.update(implicit_namespace)
class CodeExecutionMixin:
# Issue #15230 (run_path not handling run_name correctly) highlighted a
# problem with the way arguments were being passed from higher level APIs
# down to lower level code. This mixin makes it easier to ensure full
# testing occurs at those upper layers as well, not just at the utility
# layer
def assertNamespaceMatches(self, result_ns, expected_ns):
"""Check two namespaces match.
Ignores any unspecified interpreter created names
"""
# Impls are permitted to add extra names, so filter them out
for k in list(result_ns):
if k.startswith("__") and k.endswith("__"):
if k not in expected_ns:
result_ns.pop(k)
if k not in expected_ns["nested"]:
result_ns["nested"].pop(k)
# Don't use direct dict comparison - the diffs are too hard to debug
self.assertEqual(set(result_ns), set(expected_ns))
for k in result_ns:
actual = (k, result_ns[k])
expected = (k, expected_ns[k])
self.assertEqual(actual, expected)
def check_code_execution(self, create_namespace, expected_namespace):
"""Check that an interface runs the example code correctly
First argument is a callable accepting the initial globals and
using them to create the actual namespace
Second argument is the expected result
"""
sentinel = object()
expected_ns = expected_namespace.copy()
run_name = expected_ns["__name__"]
saved_argv0 = sys.argv[0]
saved_mod = sys.modules.get(run_name, sentinel)
# Check without initial globals
result_ns = create_namespace(None)
self.assertNamespaceMatches(result_ns, expected_ns)
self.assertIs(sys.argv[0], saved_argv0)
self.assertIs(sys.modules.get(run_name, sentinel), saved_mod)
# And then with initial globals
initial_ns = {"sentinel": sentinel}
expected_ns["sentinel"] = sentinel
result_ns = create_namespace(initial_ns)
self.assertIsNot(result_ns, initial_ns)
self.assertNamespaceMatches(result_ns, expected_ns)
self.assertIs(sys.argv[0], saved_argv0)
self.assertIs(sys.modules.get(run_name, sentinel), saved_mod)
class ExecutionLayerTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
def test_run_code(self):
expected_ns = example_namespace.copy()
expected_ns.update({
"__loader__": None,
})
def create_ns(init_globals):
return _run_code(example_source, {}, init_globals)
self.check_code_execution(create_ns, expected_ns)
def test_run_module_code(self):
mod_name = "<Nonsense>"
mod_fname = "Some other nonsense"
mod_loader = "Now you're just being silly"
mod_package = '' # Treat as a top level module
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__loader__": mod_loader,
"__package__": mod_package,
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return _run_module_code(example_source,
init_globals,
mod_name,
mod_fname,
mod_loader,
mod_package)
self.check_code_execution(create_ns, expected_ns)
# TODO: Use self.addCleanup to get rid of a lot of try-finally blocks
class RunModuleTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
self.assertEqual(run_module("runpy")["__name__"], "runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__.py")
create_empty_file(pkg_fname)
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test"):
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = os.path.realpath(tempfile.mkdtemp())
if verbose > 1: print(" Package tree in:", sub_dir)
sys.path.insert(0, pkg_dir)
if verbose > 1: print(" Updated sys.path:", sys.path[0])
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose > 1: print(" Next level in:", sub_dir)
if verbose > 1: print(" Created:", pkg_fname)
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose > 1: print(" Created:", mod_fname)
mod_name = (pkg_name+".")*depth + mod_base
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose > 1: print(" Removed sys.modules entries")
del sys.path[0]
if verbose > 1: print(" Removed sys.path entry")
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
try:
os.rmdir(top)
if verbose > 1: print(" Removed package tree")
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
def _fix_ns_for_legacy_pyc(self, ns, alter_sys):
char_to_add = "c" if __debug__ else "o"
ns["__file__"] += char_to_add
if alter_sys:
ns["run_argv0"] += char_to_add
def _check_module(self, depth, alter_sys=False):
pkg_dir, mod_fname, mod_name = (
self._make_pkg(example_source, depth))
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__package__": mod_name.rpartition(".")[0],
})
if alter_sys:
expected_ns.update({
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return run_module(mod_name, init_globals, alter_sys=alter_sys)
try:
if verbose > 1: print("Running from source:", mod_name)
self.check_code_execution(create_ns, expected_ns)
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case loader caches paths
importlib.invalidate_caches()
if verbose > 1: print("Running from compiled:", mod_name)
self._fix_ns_for_legacy_pyc(expected_ns, alter_sys)
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose > 1: print("Module executed successfully")
def _check_package(self, depth, alter_sys=False):
pkg_dir, mod_fname, mod_name = (
self._make_pkg(example_source, depth, "__main__"))
pkg_name = mod_name.rpartition(".")[0]
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__package__": pkg_name,
})
if alter_sys:
expected_ns.update({
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return run_module(pkg_name, init_globals, alter_sys=alter_sys)
try:
if verbose > 1: print("Running from source:", pkg_name)
self.check_code_execution(create_ns, expected_ns)
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case loader caches paths
if verbose > 1: print("Running from compiled:", pkg_name)
importlib.invalidate_caches()
self._fix_ns_for_legacy_pyc(expected_ns, alter_sys)
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose > 1: print("Package executed successfully")
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling.py")
create_empty_file(sibling_fname)
if verbose > 1: print(" Added sibling module:", sibling_fname)
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose > 1: print(" Added uncle package:", uncle_dir)
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose > 1: print(" Added cousin package:", cousin_dir)
nephew_fname = os.path.join(cousin_dir, "nephew.py")
create_empty_file(nephew_fname)
if verbose > 1: print(" Added nephew module:", nephew_fname)
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
if run_name is None:
expected_name = mod_name
else:
expected_name = run_name
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose > 1: print("Running from source:", mod_name)
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertEqual(d1["__name__"], expected_name)
self.assertEqual(d1["__package__"], pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case the loader caches paths
if verbose > 1: print("Running from compiled:", mod_name)
importlib.invalidate_caches()
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertEqual(d2["__name__"], expected_name)
self.assertEqual(d2["__package__"], pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose > 1: print("Module executed successfully")
def test_run_module(self):
for depth in range(4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth)
def test_run_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth)
def test_run_module_alter_sys(self):
for depth in range(4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth, alter_sys=True)
def test_run_package_alter_sys(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, alter_sys=True)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose > 1: print("Testing relative imports at depth:", depth)
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose > 1: print("Testing main relative imports at depth:", depth)
self._check_relative_imports(depth, "__main__")
def test_run_name(self):
depth = 1
run_name = "And now for something completely different"
pkg_dir, mod_fname, mod_name = (
self._make_pkg(example_source, depth))
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": run_name,
"__file__": mod_fname,
"__package__": mod_name.rpartition(".")[0],
})
def create_ns(init_globals):
return run_module(mod_name, init_globals, run_name)
try:
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir, depth, mod_name)
def test_pkgutil_walk_packages(self):
# This is a dodgy hack to use the test_runpy infrastructure to test
# issue #15343. Issue #15348 declares this is indeed a dodgy hack ;)
import pkgutil
max_depth = 4
base_name = "__runpy_pkg__"
package_suffixes = ["uncle", "uncle.cousin"]
module_suffixes = ["uncle.cousin.nephew", base_name + ".sibling"]
expected_packages = set()
expected_modules = set()
for depth in range(1, max_depth):
pkg_name = ".".join([base_name] * depth)
expected_packages.add(pkg_name)
for name in package_suffixes:
expected_packages.add(pkg_name + "." + name)
for name in module_suffixes:
expected_modules.add(pkg_name + "." + name)
pkg_name = ".".join([base_name] * max_depth)
expected_packages.add(pkg_name)
expected_modules.add(pkg_name + ".runpy_test")
pkg_dir, mod_fname, mod_name = (
self._make_pkg("", max_depth))
self.addCleanup(self._del_pkg, pkg_dir, max_depth, mod_name)
for depth in range(2, max_depth+1):
self._add_relative_modules(pkg_dir, "", depth)
for finder, mod_name, ispkg in pkgutil.walk_packages([pkg_dir]):
self.assertIsInstance(finder,
importlib.machinery.FileFinder)
if ispkg:
expected_packages.remove(mod_name)
else:
expected_modules.remove(mod_name)
self.assertEqual(len(expected_packages), 0, expected_packages)
self.assertEqual(len(expected_modules), 0, expected_modules)
class RunPathTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy.run_path"""
def _make_test_script(self, script_dir, script_basename, source=None):
if source is None:
source = example_source
return make_script(script_dir, script_basename, source)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0):
# First check is without run_name
def create_ns(init_globals):
return run_path(script_name, init_globals)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": expected_name,
"__file__": expected_file,
"__package__": "",
"run_argv0": expected_argv0,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
self.check_code_execution(create_ns, expected_ns)
# Second check makes sure run_name works in all cases
run_name = "prove.issue15230.is.fixed"
def create_ns(init_globals):
return run_path(script_name, init_globals, run_name)
expected_ns["__name__"] = run_name
expected_ns["__package__"] = run_name.rpartition(".")[0]
self.check_code_execution(create_ns, expected_ns)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegex(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir)
def test_directory_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
if not sys.dont_write_bytecode:
legacy_pyc = make_legacy_pyc(script_name)
self._check_script(script_dir, "<run_path>", legacy_pyc,
script_dir)
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name)
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
zip_name, fname = make_zip_script(script_dir, 'test_zip',
compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name)
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
@no_tracing
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegex(RuntimeError, msg, run_path, zip_name)
def test_encoding(self):
with temp_dir() as script_dir:
filename = os.path.join(script_dir, 'script.py')
with open(filename, 'w', encoding='latin1') as f:
f.write("""
#coding:latin1
s = "non-ASCII: h\xe9"
""")
result = run_path(filename)
self.assertEqual(result['s'], "non-ASCII: h\xe9")
def test_main():
run_unittest(
ExecutionLayerTestCase,
RunModuleTestCase,
RunPathTestCase
)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
AndrewGrossman/django | django/contrib/gis/geos/prototypes/io.py | 309 | 9480 | import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
class WKTOutputDim(GEOSFuncFactory):
def get_func(self, *args, **kwargs):
try:
return super(WKTOutputDim, self).get_func(*args, **kwargs)
except AttributeError:
# GEOSWKTWriter_get/setOutputDimension has been introduced in GEOS 3.3.0
# Always return 2 if not available
return {
'GEOSWKTWriter_getOutputDimension': lambda ptr: 2,
'GEOSWKTWriter_setOutputDimension': lambda ptr, dim: None,
}.get(self.func_name)
wkt_writer_get_outdim = WKTOutputDim(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = WKTOutputDim(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self._destructor.func = self._destructor.get_func(
*self._destructor.args, **self._destructor.kwargs
)
def __del__(self):
# Cleaning up with the appropriate destructor.
if self._ptr:
self._destructor(self._ptr)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return six.memoryview(wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter()
thread_context.wkt_w.outdim = dim
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter()
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter()
thread_context.ewkb_w.srid = True
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| bsd-3-clause |
wengpingbo/linux | scripts/gdb/linux/symbols.py | 68 | 6310 | #
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['core_layout']['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = arg.split()
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
| gpl-2.0 |
fubecka/f5-dashboard | flask/lib/python2.6/site-packages/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
ganshun666/micropython | tests/basics/gen_yield_from_ducktype.py | 107 | 1034 | class MyGen:
def __init__(self):
self.v = 0
def __iter__(self):
return self
def __next__(self):
self.v += 1
if self.v > 5:
raise StopIteration
return self.v
def gen():
yield from MyGen()
def gen2():
yield from gen()
print(list(gen()))
print(list(gen2()))
class Incrementer:
def __iter__(self):
return self
def __next__(self):
return self.send(None)
def send(self, val):
if val is None:
return "Incrementer initialized"
return val + 1
def gen3():
yield from Incrementer()
g = gen3()
print(next(g))
print(g.send(5))
print(g.send(100))
#
# Test proper handling of StopIteration vs other exceptions
#
class MyIter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
def gen4():
global ret
ret = yield from MyIter()
1//0
ret = None
try:
print(list(gen4()))
except ZeroDivisionError:
print("ZeroDivisionError")
print(ret)
| mit |
srcLurker/home-assistant | homeassistant/components/sensor/homematic.py | 9 | 2567 | """
The homematic sensor platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.homematic/
Important: For this platform to work the homematic component has to be
properly configured.
"""
import logging
from homeassistant.const import STATE_UNKNOWN
import homeassistant.components.homematic as homematic
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
HM_STATE_HA_CAST = {
"RotaryHandleSensor": {0: "closed", 1: "tilted", 2: "open"},
"WaterSensor": {0: "dry", 1: "wet", 2: "water"},
"CO2Sensor": {0: "normal", 1: "added", 2: "strong"},
}
HM_UNIT_HA_CAST = {
"HUMIDITY": "%",
"TEMPERATURE": "°C",
"BRIGHTNESS": "#",
"POWER": "W",
"CURRENT": "mA",
"VOLTAGE": "V",
"ENERGY_COUNTER": "Wh",
"GAS_POWER": "m3",
"GAS_ENERGY_COUNTER": "m3",
"LUX": "lux",
"RAIN_COUNTER": "mm",
"WIND_SPEED": "km/h",
"WIND_DIRECTION": "°",
"WIND_DIRECTION_RANGE": "°",
"SUNSHINEDURATION": "#",
"AIR_PRESSURE": "hPa",
"FREQUENCY": "Hz",
}
def setup_platform(hass, config, add_callback_devices, discovery_info=None):
"""Setup the platform."""
if discovery_info is None:
return
return homematic.setup_hmdevice_discovery_helper(
HMSensor,
discovery_info,
add_callback_devices
)
class HMSensor(homematic.HMDevice):
"""Represents various Homematic sensors in Home Assistant."""
@property
def state(self):
"""Return the state of the sensor."""
if not self.available:
return STATE_UNKNOWN
# Does a cast exist for this class?
name = self._hmdevice.__class__.__name__
if name in HM_STATE_HA_CAST:
return HM_STATE_HA_CAST[name].get(self._hm_get_state(), None)
# No cast, return original value
return self._hm_get_state()
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if not self.available:
return None
return HM_UNIT_HA_CAST.get(self._state, None)
def _init_data_struct(self):
"""Generate a data dict (self._data) from hm metadata."""
# Add state to data dict
if self._state:
_LOGGER.debug("%s init datadict with main node '%s'", self._name,
self._state)
self._data.update({self._state: STATE_UNKNOWN})
else:
_LOGGER.critical("Can't correctly init sensor %s.", self._name)
| mit |
Factr/newspaper | tests/unit_tests.py | 1 | 24005 | # -*- coding: utf-8 -*-
"""
All unit tests for the newspaper library should be contained in this file.
"""
import sys
import os
import unittest
import time
import traceback
from collections import defaultdict, OrderedDict
import concurrent.futures
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
# newspaper's unit tests are in their own separate module, so
# insert the parent directory manually to gain scope of the
# core module
sys.path.insert(0, PARENT_DIR)
TEXT_FN = os.path.join(TEST_DIR, 'data', 'text')
HTML_FN = os.path.join(TEST_DIR, 'data', 'html')
URLS_FILE = os.path.join(TEST_DIR, 'data', 'fulltext_url_list.txt')
import newspaper
from newspaper import Article, fulltext, Source, ArticleException, news_pool
from newspaper.configuration import Configuration
from newspaper.urls import get_domain
def print_test(method):
"""
Utility method for print verbalizing test suite, prints out
time taken for test and functions name, and status
"""
def run(*args, **kw):
ts = time.time()
print('\ttesting function %r' % method.__name__)
method(*args, **kw)
te = time.time()
print('\t[OK] in %r %2.2f sec' % (method.__name__, te - ts))
return run
def mock_resource_with(filename, resource_type):
"""
Mocks an HTTP request by pulling text from a pre-downloaded file
"""
VALID_RESOURCES = ['html', 'txt']
if resource_type not in VALID_RESOURCES:
raise Exception('Mocked resource must be one of: %s' %
', '.join(VALID_RESOURCES))
subfolder = 'text' if resource_type == 'txt' else 'html'
resource_path = os.path.join(TEST_DIR, "data/%s/%s.%s" %
(subfolder, filename, resource_type))
with open(resource_path, 'r') as f:
return f.read()
def get_base_domain(url):
"""
For example, the base url of uk.reuters.com => reuters.com
"""
domain = get_domain(url)
tld = '.'.join(domain.split('.')[-2:])
if tld in ['co.uk', 'com.au', 'au.com']: # edge cases
end_chunks = domain.split('.')[-3:]
else:
end_chunks = domain.split('.')[-2:]
base_domain = '.'.join(end_chunks)
return base_domain
def check_url(*args, **kwargs):
return ExhaustiveFullTextCase.check_url(*args, **kwargs)
@unittest.skipIf('fulltext' not in sys.argv, 'Skipping fulltext tests')
class ExhaustiveFullTextCase(unittest.TestCase):
@staticmethod
def check_url(args):
"""
:param (basestr, basestr) url, res_filename:
:return: (pubdate_failed, fulltext_failed)
"""
url, res_filename = args
pubdate_failed, fulltext_failed = False, False
html = mock_resource_with(res_filename, 'html')
try:
a = Article(url)
a.download(html)
a.parse()
if a.publish_date is None:
pubdate_failed = True
except Exception:
print('<< URL: %s parse ERROR >>' % url)
traceback.print_exc()
pubdate_failed, fulltext_failed = True, True
else:
correct_text = mock_resource_with(res_filename, 'txt')
if not (a.text == correct_text):
# print('Diff: ', simplediff.diff(correct_text, a.text))
# `correct_text` holds the reason of failure if failure
print('%s -- %s -- %s' %
('Fulltext failed',
res_filename, correct_text.strip()))
fulltext_failed = True
# TODO: assert statements are commented out for full-text
# extraction tests because we are constantly tweaking the
# algorithm and improving
# assert a.text == correct_text
return pubdate_failed, fulltext_failed
@print_test
def test_exhaustive(self):
with open(URLS_FILE, 'r') as f:
urls = [d.strip() for d in f.readlines() if d.strip()]
domain_counters = {}
def get_filename(url):
domain = get_base_domain(url)
domain_counters[domain] = domain_counters.get(domain, 0) + 1
return '{}{}'.format(domain, domain_counters[domain])
filenames = map(get_filename, urls)
with concurrent.futures.ProcessPoolExecutor() as executor:
test_results = list(executor.map(check_url, zip(urls, filenames)))
total_pubdates_failed, total_fulltext_failed = \
list(map(sum, zip(*test_results)))
print('%s fulltext extractions failed out of %s' %
(total_fulltext_failed, len(urls)))
print('%s pubdate extractions failed out of %s' %
(total_pubdates_failed, len(urls)))
self.assertGreaterEqual(47, total_pubdates_failed)
self.assertGreaterEqual(20, total_fulltext_failed)
class ArticleTestCase(unittest.TestCase):
def setup_stage(self, stage_name):
stages = OrderedDict([
('initial', lambda: None),
('download', lambda: self.article.download(
mock_resource_with('cnn_article', 'html'))),
('parse', lambda: self.article.parse()),
('meta', lambda: None), # Alias for nlp
('nlp', lambda: self.article.nlp())
])
assert stage_name in stages
for name, action in stages.items():
if name == stage_name:
break
action()
def setUp(self):
"""Called before the first test case of this unit begins
"""
self.article = Article(
url='http://www.cnn.com/2013/11/27/travel/weather-'
'thanksgiving/index.html?iref=allsearch')
@print_test
def test_url(self):
self.assertEqual(
'http://www.cnn.com/2013/11/27/travel/weather-'
'thanksgiving/index.html?iref=allsearch',
self.article.url)
@print_test
def test_download_html(self):
self.setup_stage('download')
html = mock_resource_with('cnn_article', 'html')
self.article.download(html)
self.assertEqual(75406, len(self.article.html))
@print_test
def test_meta_refresh_redirect(self):
# TODO: We actually hit example.com in this unit test ... which is bad
# Figure out how to mock an actual redirect
config = Configuration()
config.follow_meta_refresh = True
article = Article(
'', config=config)
html = mock_resource_with('google_meta_refresh', 'html')
article.download(input_html=html)
article.parse()
self.assertEqual(article.title, 'Example Domain')
@print_test
def test_meta_refresh_no_url_redirect(self):
config = Configuration()
config.follow_meta_refresh = True
article = Article(
'', config=config)
html = mock_resource_with('ap_meta_refresh', 'html')
article.download(input_html=html)
article.parse()
self.assertEqual(article.title, 'News from The Associated Press')
@print_test
def test_pre_download_parse(self):
"""Calling `parse()` before `download()` should yield an error
"""
article = Article(self.article.url)
self.assertRaises(ArticleException, article.parse)
@print_test
def test_parse_html(self):
self.setup_stage('parse')
AUTHORS = ['Chien-Ming Wang', 'Dana A. Ford', 'James S.A. Corey',
'Tom Watkins']
TITLE = 'After storm, forecasters see smooth sailing for Thanksgiving'
LEN_IMGS = 46
META_LANG = 'en'
self.article.parse()
self.article.nlp()
text = mock_resource_with('cnn', 'txt')
self.assertEqual(text, self.article.text)
self.assertEqual(text, fulltext(self.article.html))
# NOTE: top_img extraction requires an internet connection
# unlike the rest of this test file
TOP_IMG = ('http://i2.cdn.turner.com/cnn/dam/assets/131129200805-'
'01-weather-1128-story-top.jpg')
self.assertEqual(TOP_IMG, self.article.top_img)
self.assertCountEqual(AUTHORS, self.article.authors)
self.assertEqual(TITLE, self.article.title)
self.assertEqual(LEN_IMGS, len(self.article.imgs))
self.assertEqual(META_LANG, self.article.meta_lang)
self.assertEqual('2013-11-27 00:00:00', str(self.article.publish_date))
@print_test
def test_meta_type_extraction(self):
self.setup_stage('meta')
meta_type = self.article.extractor.get_meta_type(
self.article.clean_doc)
self.assertEqual('article', meta_type)
@print_test
def test_meta_extraction(self):
self.setup_stage('meta')
meta = self.article.extractor.get_meta_data(self.article.clean_doc)
META_DATA = defaultdict(dict, {
'medium': 'news',
'googlebot': 'noarchive',
'pubdate': '2013-11-27T08:36:32Z',
'title': 'After storm, forecasters see smooth sailing for Thanksgiving - CNN.com',
'og': {'site_name': 'CNN',
'description': 'A strong storm struck much of the eastern United States on Wednesday, complicating holiday plans for many of the 43 million Americans expected to travel.',
'title': 'After storm, forecasters see smooth sailing for Thanksgiving',
'url': 'http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html',
'image': 'http://i2.cdn.turner.com/cnn/dam/assets/131129200805-01-weather-1128-story-top.jpg',
'type': 'article'},
'section': 'travel',
'author': 'Dana A. Ford, James S.A. Corey, Chien-Ming Wang, and Tom Watkins, CNN',
'robots': 'index,follow',
'vr': {
'canonical': 'http://edition.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html'},
'source': 'CNN',
'fb': {'page_id': 18793419640, 'app_id': 80401312489},
'keywords': 'winter storm,holiday travel,Thanksgiving storm,Thanksgiving winter storm',
'article': {
'publisher': 'https://www.facebook.com/cnninternational'},
'lastmod': '2013-11-28T02:03:23Z',
'twitter': {'site': {'identifier': '@CNNI', 'id': 2097571},
'card': 'summary',
'creator': {'identifier': '@cnntravel',
'id': 174377718}},
'viewport': 'width=1024',
'news_keywords': 'winter storm,holiday travel,Thanksgiving storm,Thanksgiving winter storm'
})
self.assertDictEqual(META_DATA, meta)
# if the value for a meta key is another dict, that dict ought to be
# filled with keys and values
dict_values = [v for v in list(meta.values()) if isinstance(v, dict)]
self.assertTrue(all([len(d) > 0 for d in dict_values]))
# there are exactly 5 top-level "og:type" type keys
is_dict = lambda v: isinstance(v, dict)
self.assertEqual(5, len([i for i in meta.values() if is_dict(i)]))
# there are exactly 12 top-level "pubdate" type keys
is_string = lambda v: isinstance(v, str)
self.assertEqual(12, len([i for i in meta.values() if is_string(i)]))
@print_test
def test_pre_download_nlp(self):
"""Test running NLP algos before even downloading the article
"""
self.setup_stage('initial')
new_article = Article(self.article.url)
self.assertRaises(ArticleException, new_article.nlp)
@print_test
def test_pre_parse_nlp(self):
"""Test running NLP algos before parsing the article
"""
self.setup_stage('parse')
self.assertRaises(ArticleException, self.article.nlp)
@print_test
def test_nlp_body(self):
self.setup_stage('nlp')
self.article.nlp()
KEYWORDS = ['balloons', 'delays', 'flight', 'forecasters',
'good', 'sailing', 'smooth', 'storm', 'thanksgiving',
'travel', 'weather', 'winds', 'york']
SUMMARY = mock_resource_with('cnn_summary', 'txt')
self.assertEqual(SUMMARY, self.article.summary)
self.assertCountEqual(KEYWORDS, self.article.keywords)
class ContentExtractorTestCase(unittest.TestCase):
"""Test specific element extraction cases"""
def setUp(self):
self.extractor = newspaper.extractors.ContentExtractor(Configuration())
self.parser = newspaper.parsers.Parser
def _get_title(self, html):
doc = self.parser.fromstring(html)
return self.extractor.get_title(doc)
def test_get_title_basic(self):
html = '<title>Test title</title>'
self.assertEqual(self._get_title(html), 'Test title')
def test_get_title_split(self):
html = '<title>Test page » Test title</title>'
self.assertEqual(self._get_title(html), 'Test title')
def test_get_title_split_escaped(self):
html = '<title>Test page » Test title</title>'
self.assertEqual(self._get_title(html), 'Test title')
def test_get_title_quotes(self):
title = 'Test page and «something in quotes»'
html = '<title>{}</title>'.format(title)
self.assertEqual(self._get_title(html), title)
def _get_canonical_link(self, article_url, html):
doc = self.parser.fromstring(html)
return self.extractor.get_canonical_link(article_url, doc)
def test_get_canonical_link_rel_canonical(self):
url = 'http://www.example.com/article.html'
html = '<link rel="canonical" href="{}">'.format(url)
self.assertEqual(self._get_canonical_link('', html), url)
def test_get_canonical_link_rel_canonical_absolute_url(self):
url = 'http://www.example.com/article.html'
html = '<link rel="canonical" href="article.html">'
article_url = 'http://www.example.com/article?foo=bar'
self.assertEqual(self._get_canonical_link(article_url, html), url)
def test_get_canonical_link_og_url_absolute_url(self):
url = 'http://www.example.com/article.html'
html = '<meta property="og:url" content="article.html">'
article_url = 'http://www.example.com/article?foo=bar'
self.assertEqual(self._get_canonical_link(article_url, html), url)
def test_get_canonical_link_hostname_og_url_absolute_url(self):
url = 'http://www.example.com/article.html'
html = '<meta property="og:url" content="www.example.com/article.html">'
article_url = 'http://www.example.com/article?foo=bar'
self.assertEqual(self._get_canonical_link(article_url, html), url)
class SourceTestCase(unittest.TestCase):
@print_test
def test_source_url_input_none(self):
with self.assertRaises(Exception):
Source(url=None)
@unittest.skip("Need to mock download")
@print_test
def test_source_build(self):
"""
builds a source object, validates it has no errors, prints out
all valid categories and feed urls
"""
DESC = ('CNN.com International delivers breaking news from across '
'the globe and information on the latest top stories, '
'business, sports and entertainment headlines. Follow the '
'news as it happens through: special reports, videos, '
'audio, photo galleries plus interactive maps and timelines.')
CATEGORY_URLS = [
'http://cnn.com/ASIA', 'http://connecttheworld.blogs.cnn.com',
'http://cnn.com/HLN', 'http://cnn.com/MIDDLEEAST',
'http://cnn.com', 'http://ireport.cnn.com',
'http://cnn.com/video', 'http://transcripts.cnn.com',
'http://cnn.com/espanol',
'http://partners.cnn.com', 'http://www.cnn.com',
'http://cnn.com/US', 'http://cnn.com/EUROPE',
'http://cnn.com/TRAVEL', 'http://cnn.com/cnni',
'http://cnn.com/SPORT', 'http://cnn.com/mostpopular',
'http://arabic.cnn.com', 'http://cnn.com/WORLD',
'http://cnn.com/LATINAMERICA', 'http://us.cnn.com',
'http://travel.cnn.com', 'http://mexico.cnn.com',
'http://cnn.com/SHOWBIZ', 'http://edition.cnn.com',
'http://amanpour.blogs.cnn.com', 'http://money.cnn.com',
'http://cnn.com/tools/index.html', 'http://cnnespanol.cnn.com',
'http://cnn.com/CNNI', 'http://business.blogs.cnn.com',
'http://cnn.com/AFRICA', 'http://cnn.com/TECH',
'http://cnn.com/BUSINESS']
FEEDS = ['http://rss.cnn.com/rss/edition.rss']
BRAND = 'cnn'
s = Source('http://cnn.com', verbose=False, memoize_articles=False)
# html = mock_resource_with('http://cnn.com', 'cnn_main_site')
s.clean_memo_cache()
s.build()
# TODO: The rest of the source extraction features will be fully tested
# after I figure out a way to sensibly mock the HTTP requests for all
# of the category and feeed URLs
# assert s.brand == BRAND
# assert s.description == DESC
# assert s.size() == 266
# assert s.category_urls() == CATEGORY_URLS
# TODO: A lot of the feed extraction is NOT being tested because feeds
# are primarly extracted from the HTML of category URLs. We lose this
# effect by just mocking CNN's main page HTML. Warning: tedious fix.
# assert s.feed_urls() == FEEDS
@unittest.skip("Need to mock download")
@print_test
def test_cache_categories(self):
"""Builds two same source objects in a row examines speeds of both
"""
url = 'http://uk.yahoo.com'
html = mock_resource_with('yahoo_main_site', 'html')
s = Source(url)
s.download()
s.parse()
s.set_categories()
saved_urls = s.category_urls()
s.categories = []
s.set_categories()
self.assertCountEqual(saved_urls, s.category_urls())
class UrlTestCase(unittest.TestCase):
@print_test
def test_valid_urls(self):
"""Prints out a list of urls with our heuristic guess if it is a
valid news url purely based on the url
"""
from newspaper.urls import valid_url
with open(os.path.join(TEST_DIR, 'data/test_urls.txt'), 'r') as f:
lines = f.readlines()
test_tuples = [tuple(l.strip().split(' ')) for l in lines]
# tuples are ('1', 'url_goes_here') form, '1' means valid,
# '0' otherwise
for lst, url in test_tuples:
truth_val = bool(int(lst))
try:
self.assertEqual(truth_val, valid_url(url, test=True))
except AssertionError:
print('\t\turl: %s is supposed to be %s' % (url, truth_val))
raise
@unittest.skip("Need to write an actual test")
@print_test
def test_prepare_url(self):
"""Normalizes a url, removes arguments, hashtags. If a relative url, it
merges it with the source domain to make an abs url, etc
"""
pass
class APITestCase(unittest.TestCase):
@print_test
def test_hot_trending(self):
"""Grab google trending, just make sure this runs
"""
newspaper.hot()
@print_test
def test_popular_urls(self):
"""Just make sure this method runs
"""
newspaper.popular_urls()
@unittest.skip("Need to mock download")
class MThreadingTestCase(unittest.TestCase):
@print_test
def test_download_works(self):
config = Configuration()
config.memoize_articles = False
slate_paper = newspaper.build('http://slate.com', config=config)
tc_paper = newspaper.build('http://techcrunch.com', config=config)
espn_paper = newspaper.build('http://espn.com', config=config)
print(('Slate has %d articles TC has %d articles ESPN has %d articles'
% (slate_paper.size(), tc_paper.size(), espn_paper.size())))
papers = [slate_paper, tc_paper, espn_paper]
news_pool.set(papers, threads_per_source=2)
news_pool.join()
print('Downloaded Slate mthread len',
len(slate_paper.articles[0].html))
print('Downloaded ESPN mthread len',
len(espn_paper.articles[-1].html))
print('Downloaded TC mthread len',
len(tc_paper.articles[1].html))
class ConfigBuildTestCase(unittest.TestCase):
"""Test if our **kwargs to config building setup actually works.
NOTE: No need to mock responses as we are just initializing the
objects, not actually calling download(..)
"""
@print_test
def test_article_default_params(self):
a = Article(url='http://www.cnn.com/2013/11/27/'
'travel/weather-thanksgiving/index.html')
self.assertEqual('en', a.config.language)
self.assertTrue(a.config.memoize_articles)
self.assertTrue(a.config.use_meta_language)
@print_test
def test_article_custom_params(self):
a = Article(url='http://www.cnn.com/2013/11/27/travel/'
'weather-thanksgiving/index.html',
language='zh', memoize_articles=False)
self.assertEqual('zh', a.config.language)
self.assertFalse(a.config.memoize_articles)
self.assertFalse(a.config.use_meta_language)
@print_test
def test_source_default_params(self):
s = Source(url='http://cnn.com')
self.assertEqual('en', s.config.language)
self.assertEqual(20000, s.config.MAX_FILE_MEMO)
self.assertTrue(s.config.memoize_articles)
self.assertTrue(s.config.use_meta_language)
@print_test
def test_source_custom_params(self):
s = Source(url="http://cnn.com", memoize_articles=False,
MAX_FILE_MEMO=10000, language='en')
self.assertFalse(s.config.memoize_articles)
self.assertEqual(10000, s.config.MAX_FILE_MEMO)
self.assertEqual('en', s.config.language)
self.assertFalse(s.config.use_meta_language)
class MultiLanguageTestCase(unittest.TestCase):
@print_test
def test_chinese_fulltext_extract(self):
url = 'http://news.sohu.com/20050601/n225789219.shtml'
article = Article(url=url, language='zh')
html = mock_resource_with('chinese_article', 'html')
article.download(html)
article.parse()
text = mock_resource_with('chinese', 'txt')
self.assertEqual(text, article.text)
self.assertEqual(text, fulltext(article.html, 'zh'))
@print_test
def test_arabic_fulltext_extract(self):
url = 'http://arabic.cnn.com/2013/middle_east/8/3/syria.clashes/' \
'index.html'
article = Article(url=url)
html = mock_resource_with('arabic_article', 'html')
article.download(html)
article.parse()
self.assertEqual('ar', article.meta_lang)
text = mock_resource_with('arabic', 'txt')
self.assertEqual(text, article.text)
self.assertEqual(text, fulltext(article.html, 'ar'))
@print_test
def test_spanish_fulltext_extract(self):
url = 'http://ultimahora.es/mallorca/noticia/noticias/local/fiscal' \
'ia-anticorrupcion-estudia-recurre-imputacion-infanta.html'
article = Article(url=url, language='es')
html = mock_resource_with('spanish_article', 'html')
article.download(html)
article.parse()
text = mock_resource_with('spanish', 'txt')
self.assertEqual(text, article.text)
self.assertEqual(text, fulltext(article.html, 'es'))
if __name__ == '__main__':
argv = list(sys.argv)
if 'fulltext' in argv:
argv.remove('fulltext') # remove it here, so it doesn't pass to unittest
unittest.main(verbosity=0, argv=argv)
| mit |
andreparames/odoo | openerp/tools/amount_to_text_en.py | 441 | 5103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Southpaw-TACTIC/TACTIC | 3rd_party/python2/site-packages/dateutil/zoneinfo/__init__.py | 265 | 2575 | """
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
| epl-1.0 |
zhangxq5012/sky_engine | mojo/tools/mopy/mojo_python_tests_runner.py | 9 | 1578 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
import unittest
import mopy.paths
class MojoPythonTestRunner(object):
"""Helper class to run python tests on the bots."""
def __init__(self, test_dir):
self._test_dir = test_dir
def run(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('tests', nargs='*')
self.add_custom_commandline_options(parser)
args = parser.parse_args()
self.apply_customization(args)
loader = unittest.loader.TestLoader()
print "Running Python unit tests under %s..." % self._test_dir
src_root = mopy.paths.Paths().src_root
pylib_dir = os.path.abspath(os.path.join(src_root, self._test_dir))
if args.tests:
if pylib_dir not in sys.path:
sys.path.append(pylib_dir)
suite = unittest.TestSuite()
for test_name in args.tests:
suite.addTests(loader.loadTestsFromName(test_name))
else:
suite = loader.discover(pylib_dir, pattern='*_unittest.py')
runner = unittest.runner.TextTestRunner(verbosity=(args.verbose + 1))
result = runner.run(suite)
return 0 if result.wasSuccessful() else 1
def add_custom_commandline_options(self, parser):
"""Allow to add custom option to the runner script."""
pass
def apply_customization(self, args):
"""Allow to apply any customization to the runner."""
pass
| bsd-3-clause |
apache/incubator-airflow | dev/provider_packages/refactor_provider_packages.py | 3 | 33388 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import token
from os.path import dirname
from shutil import copyfile, copytree, rmtree
from typing import List
from bowler import LN, TOKEN, Capture, Filename, Query
from fissix.fixer_util import Comma, KeywordArg, Name
from fissix.pytree import Leaf
from dev.provider_packages.prepare_provider_packages import (
get_source_airflow_folder,
get_source_providers_folder,
get_target_providers_folder,
get_target_providers_package_folder,
)
def copy_provider_sources() -> None:
"""
Copies provider sources to directory where they will be refactored.
"""
def rm_build_dir() -> None:
"""
Removes build directory.
"""
build_dir = os.path.join(dirname(__file__), "build")
if os.path.isdir(build_dir):
rmtree(build_dir)
def ignore_google_auth_backend(src: str, names: List[str]) -> List[str]:
del names
if src.endswith("google" + os.path.sep + "common"):
return ["auth_backend"]
return []
def ignore_some_files(src: str, names: List[str]) -> List[str]:
ignored_list = []
ignored_list.extend(ignore_google_auth_backend(src=src, names=names))
return ignored_list
rm_build_dir()
package_providers_dir = get_target_providers_folder()
if os.path.isdir(package_providers_dir):
rmtree(package_providers_dir)
copytree(get_source_providers_folder(), get_target_providers_folder(), ignore=ignore_some_files)
def copy_helper_py_file(target_file_path: str) -> None:
"""
Copies. airflow/utils/helper.py to a new location within provider package
The helper has two methods (chain, cross_downstream) that are moved from the original helper to
'airflow.models.baseoperator'. so in 1.10 they should reimport the original 'airflow.utils.helper'
methods. Those deprecated methods use import with import_string("<IMPORT>") so it is easier to
replace them as strings rather than with Bowler
:param target_file_path: target path name for the helpers.py
"""
source_helper_file_path = os.path.join(get_source_airflow_folder(), "airflow", "utils", "helpers.py")
with open(source_helper_file_path) as in_file:
with open(target_file_path, "wt") as out_file:
for line in in_file:
out_file.write(line.replace('airflow.models.baseoperator', 'airflow.utils.helpers'))
class RefactorBackportPackages:
"""
Refactors the code of providers, so that it works in 1.10.
"""
def __init__(self):
self.qry = Query()
def remove_class(self, class_name) -> None:
"""
Removes class altogether. Example diff generated:
.. code-block:: diff
--- ./airflow/providers/qubole/example_dags/example_qubole.py
+++ ./airflow/providers/qubole/example_dags/example_qubole.py
@@ -22,7 +22,7 @@
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
-from airflow.operators.python import BranchPythonOperator, PythonOperator
+from airflow.operators.python_operator import BranchPythonOperator, PythonOperator
from airflow.providers.qubole.operators.qubole import QuboleOperator
from airflow.providers.qubole.sensors.qubole import QuboleFileSensor, QubolePartitionSensor
from airflow.utils.dates import days_ago
:param class_name: name to remove
"""
def _remover(node: LN, capture: Capture, filename: Filename) -> None:
node.remove()
self.qry.select_class(class_name).modify(_remover)
def rename_deprecated_modules(self) -> None:
"""
Renames back to deprecated modules imported. Example diff generated:
.. code-block:: diff
--- ./airflow/providers/dingding/operators/dingding.py
+++ ./airflow/providers/dingding/operators/dingding.py
@@ -16,7 +16,7 @@
# specific language governing permissions and limitations
# under the License.
-from airflow.operators.baseoperator import BaseOperator
+from airflow.operators.bash_operator import BaseOperator
from airflow.providers.dingding.hooks.dingding import DingdingHook
from airflow.utils.decorators import apply_defaults
"""
changes = [
("airflow.hooks.base", "airflow.hooks.base_hook"),
("airflow.hooks.dbapi", "airflow.hooks.dbapi_hook"),
("airflow.operators.bash", "airflow.operators.bash_operator"),
("airflow.operators.branch", "airflow.operators.branch_operator"),
("airflow.operators.dummy", "airflow.operators.dummy_operator"),
("airflow.operators.python", "airflow.operators.python_operator"),
("airflow.operators.trigger_dagrun", "airflow.operators.dagrun_operator"),
("airflow.sensors.base", "airflow.sensors.base_sensor_operator"),
("airflow.sensors.date_time", "airflow.sensors.date_time_sensor"),
("airflow.sensors.external_task", "airflow.sensors.external_task_sensor"),
("airflow.sensors.sql", "airflow.sensors.sql_sensor"),
("airflow.sensors.time_delta", "airflow.sensors.time_delta_sensor"),
("airflow.sensors.weekday", "airflow.contrib.sensors.weekday_sensor"),
("airflow.utils.session", "airflow.utils.db"),
]
for new, old in changes:
self.qry.select_module(new).rename(old)
def is_not_k8spodop(node: LN, capture: Capture, filename: Filename) -> bool:
return not filename.endswith("/kubernetes_pod.py")
self.qry.select_module("airflow.providers.cncf.kubernetes.backcompat").filter(
callback=is_not_k8spodop
).rename("airflow.kubernetes")
self.qry.select_module("airflow.providers.cncf.kubernetes.backcompat.pod_runtime_info_env").rename(
"airflow.kubernetes.pod_runtime_info_env"
)
backcompat_target_folder = os.path.join(
get_target_providers_package_folder("cncf.kubernetes"), "backcompat"
)
# Remove backcompat classes that are imported from "airflow.kubernetes"
for file in ['pod.py', 'pod_runtime_info_env.py', 'volume.py', 'volume_mount.py']:
os.remove(os.path.join(backcompat_target_folder, file))
def add_provide_context_to_python_operators(self) -> None:
"""
Adds provide context to usages of Python/BranchPython Operators - mostly in example_dags.
Note that those changes apply to example DAGs not to the operators/hooks erc.
We package the example DAGs together with the provider classes and they should serve as
examples independently on the version of Airflow it will be installed in.
Provide_context feature in Python operators was feature added 2.0.0 and we are still
using the "Core" operators from the Airflow version that the provider packages are installed
in - the "Core" operators do not have (for now) their own provider package.
The core operators are:
* Python
* BranchPython
* Bash
* Branch
* Dummy
* LatestOnly
* ShortCircuit
* PythonVirtualEnv
Example diff generated:
.. code-block:: diff
--- ./airflow/providers/amazon/aws/example_dags/example_google_api_to_s3_transfer_advanced.py
+++ ./airflow/providers/amazon/aws/example_dags/example_google_api_to_s3_transfer_advanced.py
@@ -105,7 +105,8 @@
task_video_ids_to_s3.google_api_response_via_xcom,
task_video_ids_to_s3.task_id
],
- task_id='check_and_transform_video_ids'
+ task_id='check_and_transform_video_ids',
+ provide_context=True
)
"""
def add_provide_context_to_python_operator(node: LN, capture: Capture, filename: Filename) -> None:
fn_args = capture['function_arguments'][0]
if len(fn_args.children) > 0 and (
not isinstance(fn_args.children[-1], Leaf) or fn_args.children[-1].type != token.COMMA
):
fn_args.append_child(Comma())
provide_context_arg = KeywordArg(Name('provide_context'), Name('True'))
provide_context_arg.prefix = fn_args.children[0].prefix
fn_args.append_child(provide_context_arg)
(self.qry.select_function("PythonOperator").is_call().modify(add_provide_context_to_python_operator))
(
self.qry.select_function("BranchPythonOperator")
.is_call()
.modify(add_provide_context_to_python_operator)
)
def remove_super_init_call(self):
r"""
Removes super().__init__() call from Hooks.
In airflow 1.10 almost none of the Hooks call super().init(). It was always broken in Airflow 1.10 -
the BaseHook() has it's own __init__() which is wrongly implemented and requires source
parameter to be passed::
.. code-block:: python
def __init__(self, source):
pass
We fixed it in 2.0, but for the entire 1.10 line calling super().init() is not a good idea -
and it basically does nothing even if you do. And it's bad because it does not initialize
LoggingMixin (BaseHook derives from LoggingMixin). And it is the main reason why Hook
logs are not working as they are supposed to sometimes:
.. code-block:: python
class LoggingMixin(object):
\"\"\"
Convenience super-class to have a logger configured with the class name
\"\"\"
def __init__(self, context=None):
self._set_context(context)
There are two Hooks in 1.10 that call super.__init__ :
.. code-block:: python
super(CloudSqlDatabaseHook, self).__init__(source=None)
super(MongoHook, self).__init__(source='mongo')
Not that it helps with anything because init in BaseHook does nothing. So we remove
the super().init() in Hooks when backporting to 1.10.
Example diff generated:
.. code-block:: diff
--- ./airflow/providers/apache/druid/hooks/druid.py
+++ ./airflow/providers/apache/druid/hooks/druid.py
@@ -49,7 +49,7 @@
timeout=1,
max_ingestion_time=None):
- super().__init__()
+
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
"""
def remove_super_init_call_modifier(node: LN, capture: Capture, filename: Filename) -> None:
for ch in node.post_order():
if isinstance(ch, Leaf) and ch.value == "super":
if any(c.value for c in ch.parent.post_order() if isinstance(c, Leaf)):
ch.parent.remove()
self.qry.select_subclass("BaseHook").modify(remove_super_init_call_modifier)
def remove_tags(self):
"""
Removes tags from execution of the operators (in example_dags). Note that those changes
apply to example DAGs not to the operators/hooks erc. We package the example DAGs together
with the provider classes and they should serve as examples independently on the version
of Airflow it will be installed in. The tags are feature added in 1.10.10 and occasionally
we will want to run example DAGs as system tests in pre-1.10.10 version so we want to
remove the tags here.
Example diff generated:
.. code-block:: diff
-- ./airflow/providers/amazon/aws/example_dags/example_datasync_2.py
+++ ./airflow/providers/amazon/aws/example_dags/example_datasync_2.py
@@ -83,8 +83,7 @@
with models.DAG(
"example_datasync_2",
default_args=default_args,
- schedule_interval=None, # Override to match your needs
- tags=['example'],
+ schedule_interval=None,
) as dag:
"""
def remove_tags_modifier(_: LN, capture: Capture, filename: Filename) -> None:
for node in capture['function_arguments'][0].post_order():
if isinstance(node, Leaf) and node.value == "tags" and node.type == TOKEN.NAME:
if node.parent.next_sibling and node.parent.next_sibling.value == ",":
node.parent.next_sibling.remove()
node.parent.remove()
# Remove tags
self.qry.select_method("DAG").is_call().modify(remove_tags_modifier)
def remove_poke_mode_only_decorator(self):
r"""
Removes @poke_mode_only decorator. The decorator is only available in Airflow 2.0.
Example diff generated:
.. code-block:: diff
--- ./airflow/providers/google/cloud/sensors/gcs.py
+++ ./airflow/providers/google/cloud/sensors/gcs.py
@@ -189,7 +189,6 @@
return datetime.now()
-@poke_mode_only
class GCSUploadSessionCompleteSensor(BaseSensorOperator):
\"\"\"
Checks for changes in the number of objects at prefix in Google Cloud Storage
"""
def find_and_remove_poke_mode_only_import(node: LN):
for child in node.children:
if isinstance(child, Leaf) and child.type == 1 and child.value == 'poke_mode_only':
import_node = child.parent
# remove the import by default
skip_import_remove = False
if isinstance(child.prev_sibling, Leaf) and child.prev_sibling.value == ",":
# remove coma before the whole import
child.prev_sibling.remove()
# do not remove if there are other imports
skip_import_remove = True
if isinstance(child.next_sibling, Leaf) and child.prev_sibling.value == ",":
# but keep the one after and do not remove the whole import
skip_import_remove = True
# remove the import
child.remove()
if not skip_import_remove:
# remove import of there were no sibling
import_node.remove()
else:
find_and_remove_poke_mode_only_import(child)
def find_root_remove_import(node: LN):
current_node = node
while current_node.parent:
current_node = current_node.parent
find_and_remove_poke_mode_only_import(current_node)
def is_poke_mode_only_decorator(node: LN) -> bool:
return (
node.children
and len(node.children) >= 2
and isinstance(node.children[0], Leaf)
and node.children[0].value == '@'
and isinstance(node.children[1], Leaf)
and node.children[1].value == 'poke_mode_only'
)
def remove_poke_mode_only_modifier(node: LN, capture: Capture, filename: Filename) -> None:
for child in capture['node'].parent.children:
if is_poke_mode_only_decorator(child):
find_root_remove_import(child)
child.remove()
self.qry.select_subclass("BaseSensorOperator").modify(remove_poke_mode_only_modifier)
def refactor_amazon_package(self):
"""
Fixes to "amazon" providers package.
Copies some of the classes used from core Airflow to "common.utils" package of
the provider and renames imports to use them from there.
We copy typing_compat.py and change import as in example diff:
.. code-block:: diff
--- ./airflow/providers/amazon/aws/operators/ecs.py
+++ ./airflow/providers/amazon/aws/operators/ecs.py
@@ -24,7 +24,7 @@
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
-from airflow.typing_compat import Protocol, runtime_checkable
+from airflow.providers.amazon.common.utils.typing_compat import Protocol, runtime_checkable
from airflow.utils.decorators import apply_defaults
"""
def amazon_package_filter(node: LN, capture: Capture, filename: Filename) -> bool:
return filename.startswith("./airflow/providers/amazon/")
os.makedirs(
os.path.join(get_target_providers_package_folder("amazon"), "common", "utils"), exist_ok=True
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
os.path.join(get_target_providers_package_folder("amazon"), "common", "__init__.py"),
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
os.path.join(get_target_providers_package_folder("amazon"), "common", "utils", "__init__.py"),
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "typing_compat.py"),
os.path.join(
get_target_providers_package_folder("amazon"), "common", "utils", "typing_compat.py"
),
)
(
self.qry.select_module("airflow.typing_compat")
.filter(callback=amazon_package_filter)
.rename("airflow.providers.amazon.common.utils.typing_compat")
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "email.py"),
os.path.join(get_target_providers_package_folder("amazon"), "common", "utils", "email.py"),
)
(
self.qry.select_module("airflow.utils.email")
.filter(callback=amazon_package_filter)
.rename("airflow.providers.amazon.common.utils.email")
)
def refactor_elasticsearch_package(self):
"""
Fixes to "elasticsearch" providers package.
Copies some of the classes used from core Airflow to "common.utils" package of
the provider and renames imports to use them from there.
We copy file_task_handler.py and change import as in example diff:
.. code-block:: diff
--- ./airflow/providers/elasticsearch/log/es_task_handler.py
+++ ./airflow/providers/elasticsearch/log/es_task_handler.py
@@ -24,7 +24,7 @@
from airflow.configuration import conf
from airflow.models import TaskInstance
from airflow.utils import timezone
from airflow.utils.helpers import parse_template_string
-from airflow.utils.log.file_task_handler import FileTaskHandler
+from airflow.providers.elasticsearch.common.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.json_formatter import JSONFormatter
from airflow.utils.log.logging_mixin import LoggingMixin
"""
def elasticsearch_package_filter(node: LN, capture: Capture, filename: Filename) -> bool:
return filename.startswith("./airflow/providers/elasticsearch/")
os.makedirs(
os.path.join(get_target_providers_package_folder("elasticsearch"), "common", "utils", "log"),
exist_ok=True,
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
os.path.join(get_target_providers_package_folder("elasticsearch"), "common", "__init__.py"),
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
os.path.join(
get_target_providers_package_folder("elasticsearch"), "common", "utils", "__init__.py"
),
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "log", "__init__.py"),
os.path.join(
get_target_providers_package_folder("elasticsearch"), "common", "utils", "log", "__init__.py"
),
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "log", "file_task_handler.py"),
os.path.join(
get_target_providers_package_folder("elasticsearch"),
"common",
"utils",
"log",
"file_task_handler.py",
),
)
(
self.qry.select_module("airflow.utils.log.file_task_handler")
.filter(callback=elasticsearch_package_filter)
.rename("airflow.providers.elasticsearch.common.utils.log.file_task_handler")
)
def refactor_google_package(self):
r"""
Fixes to "google" providers package.
Copies some of the classes used from core Airflow to "common.utils" package of the
the provider and renames imports to use them from there. Note that in this case we also rename
the imports in the copied files.
For example we copy python_virtualenv.py, process_utils.py and change import as in example diff:
.. code-block:: diff
--- ./airflow/providers/google/cloud/operators/kubernetes_engine.py
+++ ./airflow/providers/google/cloud/operators/kubernetes_engine.py
@@ -28,11 +28,11 @@
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
-from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
+from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils.decorators import apply_defaults
-from airflow.utils.process_utils import execute_in_subprocess, patch_environ
+from airflow.providers.google.common.utils.process_utils import execute_in_subprocess
And in the copied python_virtualenv.py we also change import to process_utils.py. This happens
automatically and is solved by Pybowler.
.. code-block:: diff
--- ./airflow/providers/google/common/utils/python_virtualenv.py
+++ ./airflow/providers/google/common/utils/python_virtualenv.py
@@ -21,7 +21,7 @@
\"\"\"
from typing import List, Optional
-from airflow.utils.process_utils import execute_in_subprocess
+from airflow.providers.google.common.utils.process_utils import execute_in_subprocess
def _generate_virtualenv_cmd(tmp_dir: str, python_bin: str, system_site_packages: bool)
We also rename Base operator links to deprecated names:
.. code-block:: diff
--- ./airflow/providers/google/cloud/operators/mlengine.py
+++ ./airflow/providers/google/cloud/operators/mlengine.py
@@ -24,7 +24,7 @@
from typing import List, Optional
from airflow.exceptions import AirflowException
-from airflow.models import BaseOperator, BaseOperatorLink
+from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.providers.google.cloud.hooks.mlengine import MLEngineHook
from airflow.utils.decorators import apply_defaults
We also copy (google.common.utils) and rename imports to the helpers.
.. code-block:: diff
--- ./airflow/providers/google/cloud/example_dags/example_datacatalog.py
+++ ./airflow/providers/google/cloud/example_dags/example_datacatalog.py
@@ -37,7 +37,7 @@
CloudDataCatalogUpdateTagTemplateOperator,
)
from airflow.utils.dates import days_ago
-from airflow.utils.helpers import chain
+from airflow.providers.google.common.utils.helpers import chain
default_args = {"start_date": days_ago(1)}
And also module_loading which is used by helpers
.. code-block:: diff
--- ./airflow/providers/google/common/utils/helpers.py
+++ ./airflow/providers/google/common/utils/helpers.py
@@ -26,7 +26,7 @@
from jinja2 import Template
from airflow.exceptions import AirflowException
-from airflow.utils.module_loading import import_string
+from airflow.providers.google.common.utils.module_loading import import_string
KEY_REGEX = re.compile(r'^[\\w.-]+$')
"""
def google_package_filter(node: LN, capture: Capture, filename: Filename) -> bool:
return filename.startswith("./airflow/providers/google/")
def pure_airflow_models_filter(node: LN, capture: Capture, filename: Filename) -> bool:
"""Check if select is exactly [airflow, . , models]"""
return len(list(node.children[1].leaves())) == 3
def _contains_chain_in_import_filter(node: LN, capture: Capture, filename: Filename) -> bool:
if "module_import" in capture:
return bool("chain" in capture["module_import"].value) and filename.startswith(
"./airflow/providers/google/"
)
return False
os.makedirs(
os.path.join(get_target_providers_package_folder("google"), "common", "utils"), exist_ok=True
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
os.path.join(get_target_providers_package_folder("google"), "common", "utils", "__init__.py"),
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "python_virtualenv.py"),
os.path.join(
get_target_providers_package_folder("google"), "common", "utils", "python_virtualenv.py"
),
)
copy_helper_py_file(
os.path.join(get_target_providers_package_folder("google"), "common", "utils", "helpers.py")
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "module_loading.py"),
os.path.join(
get_target_providers_package_folder("google"), "common", "utils", "module_loading.py"
),
)
(
self.qry.select_module("airflow.utils.python_virtualenv")
.filter(callback=google_package_filter)
.rename("airflow.providers.google.common.utils.python_virtualenv")
)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "process_utils.py"),
os.path.join(
get_target_providers_package_folder("google"), "common", "utils", "process_utils.py"
),
)
(
self.qry.select_module("airflow.utils.process_utils")
.filter(callback=google_package_filter)
.rename("airflow.providers.google.common.utils.process_utils")
)
(
self.qry.select_module("airflow.models.baseoperator")
.filter(callback=_contains_chain_in_import_filter)
.rename("airflow.providers.google.common.utils.helpers")
)
(
self.qry.select_module("airflow.utils.helpers")
.filter(callback=google_package_filter)
.rename("airflow.providers.google.common.utils.helpers")
)
(
self.qry.select_module("airflow.utils.module_loading")
.filter(callback=google_package_filter)
.rename("airflow.providers.google.common.utils.module_loading")
)
(
# Fix BaseOperatorLinks imports
self.qry.select_module("airflow.models")
.is_filename(include=r"bigquery\.py|mlengine\.py")
.filter(callback=google_package_filter)
.filter(pure_airflow_models_filter)
.rename("airflow.models.baseoperator")
)
def refactor_odbc_package(self):
"""
Fixes to "odbc" providers package.
Copies some of the classes used from core Airflow to "common.utils" package of the
the provider and renames imports to use them from there.
We copy helpers.py and change import as in example diff:
.. code-block:: diff
--- ./airflow/providers/google/cloud/example_dags/example_datacatalog.py
+++ ./airflow/providers/google/cloud/example_dags/example_datacatalog.py
@@ -37,7 +37,7 @@
CloudDataCatalogUpdateTagTemplateOperator,
)
from airflow.utils.dates import days_ago
-from airflow.utils.helpers import chain
+from airflow.providers.odbc.utils.helpers import chain
default_args = {"start_date": days_ago(1)}
"""
def odbc_package_filter(node: LN, capture: Capture, filename: Filename) -> bool:
return filename.startswith("./airflow/providers/odbc/")
os.makedirs(os.path.join(get_target_providers_folder(), "odbc", "utils"), exist_ok=True)
copyfile(
os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
os.path.join(get_target_providers_package_folder("odbc"), "utils", "__init__.py"),
)
copy_helper_py_file(os.path.join(get_target_providers_package_folder("odbc"), "utils", "helpers.py"))
(
self.qry.select_module("airflow.utils.helpers")
.filter(callback=odbc_package_filter)
.rename("airflow.providers.odbc.utils.helpers")
)
def refactor_kubernetes_pod_operator(self):
def kubernetes_package_filter(node: LN, capture: Capture, filename: Filename) -> bool:
return filename.startswith("./airflow/providers/cncf/kubernetes")
(
self.qry.select_class("KubernetesPodOperator")
.select_method("add_xcom_sidecar")
.filter(callback=kubernetes_package_filter)
.rename("add_sidecar")
)
def do_refactor(self, in_process: bool = False) -> None: # noqa
self.rename_deprecated_modules()
self.refactor_amazon_package()
self.refactor_google_package()
self.refactor_elasticsearch_package()
self.refactor_odbc_package()
self.remove_tags()
self.remove_super_init_call()
self.add_provide_context_to_python_operators()
self.remove_poke_mode_only_decorator()
self.refactor_kubernetes_pod_operator()
# In order to debug Bowler - set in_process to True
self.qry.execute(write=True, silent=False, interactive=False, in_process=in_process)
if __name__ == '__main__':
BACKPORT_PACKAGES = os.getenv('BACKPORT_PACKAGES') == "true"
in_process = False
if len(sys.argv) > 1:
if sys.argv[1] in ['--help', '-h']:
print()
print("Refactors provider packages to be Airflow 1.10 compatible.")
print()
print(f"Usage: {sys.argv[0]} [--debug] | [-h] | [--help]")
print()
print("You can use --debug flag in order to run bowler refactoring in process.")
print("This allows you to debug bowler process as usual using your IDE debugger")
print("Otherwise it heavily uses multi-processing and is next-to-impossible to debug")
print()
print("Note - Bowler is also a lot slower in this mode.")
print()
sys.exit(0)
if sys.argv[1] == '--debug':
in_process = True
copy_provider_sources()
if BACKPORT_PACKAGES:
RefactorBackportPackages().do_refactor(in_process=in_process)
| apache-2.0 |
dansbecker/what-celebrity | data_grabbers.py | 1 | 4607 | import concurrent.futures
import indicoio
import json
import os
import socket
import urllib.request
from os.path import join, exists
from PIL import Image, ImageDraw
class Grabber(object):
def __enter__(self):
try:
with open(self._captured_data_path, 'r') as f:
self.captured_data = json.load(f)
except:
self.captured_data = []
try:
with open(self._failed_to_capture_path, 'r') as f:
self.failed_to_capture = json.load(f)
except:
self.failed_to_capture = []
return(self)
def __exit__(self, *args):
with open(self._captured_data_path, 'w') as f:
json.dump(self.captured_data, f)
with open(self._failed_to_capture_path, 'w') as f:
json.dump(self.failed_to_capture, f)
def run(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as worker_pool:
list_to_capture = self._make_list_to_capture()
for img_src, search_term in list_to_capture: # img_src can be url or local file path
worker_pool.submit(self._grab_one(img_src, search_term))
class ImageGrabber(Grabber):
def __init__(self, celeb_urls_dict):
self.celeb_urls_dict = celeb_urls_dict
self._failed_to_capture_path = join('work', 'failed_to_capture_images.json')
self._captured_data_path = join('work', 'captured_image_info.json')
socket.setdefaulttimeout(5)
def _url_to_fname(self, url):
return ''.join([i for i in url if i.isalpha()])
def _make_target_dir(self, celeb_name):
name_for_path = celeb_name.replace(" ", "_").casefold()
path = join('work', name_for_path)
if not exists(path):
os.mkdir(path)
return path
def _grab_one(self, url, search_term):
print(url)
local_img_path = self._get_file_path(url, search_term)
try:
url, _ = urllib.request.urlretrieve(url, local_img_path)
self.captured_data.append((url, local_img_path, search_term))
except:
self.failed_to_capture.append((url, local_img_path, search_term))
def _get_file_path(self, url, search_term):
search_term_dir = self._make_target_dir(search_term)
local_img_path = join(search_term_dir, self._url_to_fname(url)+".jpg")
return local_img_path
def _make_list_to_capture(self):
output = []
for search_term, url_list in self.celeb_urls_dict.items():
for url in url_list:
if not exists(self._get_file_path(url, search_term)):
output.append((url, search_term))
return output
class FacialFeatsGrabber(Grabber):
def __init__(self):
self._failed_to_capture_path = join('work', 'failed_to_featurize.json')
self._captured_data_path = join('work', 'facial_feats_data.json')
indicoio.config.api_key = os.environ['INDICO_API_KEY']
socket.setdefaulttimeout(5)
def _grab_one(self, local_img_path, search_term):
try:
img = Image.open(local_img_path)
self.captured_data.append( {'celeb': search_term,
'face_feats': indicoio.facial_features(img),
'face_corners': self._get_single_face_corners(img),
'local_img_path': local_img_path
})
except:
print('failed to grab facial feats for ' + local_img_path)
self.failed_to_capture.append((local_img_path, search_term))
def _get_single_face_corners(self, img):
"""
returns x and y coords of upper and lower left pixel of face in img (a PIL Image object)
"""
try:
face_corners = indicoio.facial_localization(img)[0]
x0, y0 = face_corners['top_left_corner']
x1, y1 = face_corners['bottom_right_corner']
return (x0, y0, x1, y1)
except:
return ()
def _make_list_to_capture(self):
output = []
already_featurized_paths = [img['local_img_path'] for img in self.captured_data]
celeb_dirs = [d for d in os.listdir('work') if os.path.isdir(join('work', d))]
for celeb in celeb_dirs:
for fname in os.listdir(join('work', celeb)):
local_img_path = join('work', celeb, fname)
if local_img_path not in already_featurized_paths:
output.append((local_img_path, celeb))
return output
| mit |
ashutosh-mishra/youtube-dl | youtube_dl/extractor/spiegel.py | 1 | 2462 | import re
import xml.etree.ElementTree
from .common import InfoExtractor
class SpiegelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
_TESTS = [{
u'url': u'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
u'file': u'1259285.mp4',
u'md5': u'2c2754212136f35fb4b19767d242f66e',
u'info_dict': {
u"title": u"Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
}
},
{
u'url': u'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
u'file': u'1309159.mp4',
u'md5': u'f2cdf638d7aa47654e251e1aee360af1',
u'info_dict': {
u'title': u'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers'
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('videoID')
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(
r'<div class="module-title">(.*?)</div>', webpage, u'title')
xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
xml_code = self._download_webpage(
xml_url, video_id,
note=u'Downloading XML', errnote=u'Failed to download XML')
idoc = xml.etree.ElementTree.fromstring(xml_code)
formats = [
{
'format_id': n.tag.rpartition('type')[2],
'url': u'http://video2.spiegel.de/flash/' + n.find('./filename').text,
'width': int(n.find('./width').text),
'height': int(n.find('./height').text),
'abr': int(n.find('./audiobitrate').text),
'vbr': int(n.find('./videobitrate').text),
'vcodec': n.find('./codec').text,
'acodec': 'MP4A',
}
for n in list(idoc)
# Blacklist type 6, it's extremely LQ and not available on the same server
if n.tag.startswith('type') and n.tag != 'type6'
]
formats.sort(key=lambda f: f['vbr'])
duration = float(idoc[0].findall('./duration')[0].text)
info = {
'id': video_id,
'title': video_title,
'duration': duration,
'formats': formats,
}
return info
| unlicense |
demonchild2112/travis-test | grr/core/grr_response_core/lib/parsers/abstract.py | 2 | 5155 | #!/usr/bin/env python
"""Registry for parsers and abstract classes for basic parser functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
from future.utils import with_metaclass
from typing import Optional
from typing import Text
class ParseError(Exception):
"""A class for errors raised when parsers encounter problems when parsing.
Attributes:
cause: An optional exception that caused this exception to be raised.
"""
def __init__(self, message, cause = None):
"""Initializes the error.
Args:
message: A message string explaining why the exception was raised.
cause: An optional exception that caused this exception to be raised.
Returns:
Nothing.
"""
if cause is not None:
message = "{message}: {cause}".format(message=message, cause=cause)
super(ParseError, self).__init__(message)
self.cause = cause
class Parser(with_metaclass(abc.ABCMeta)):
"""A base interface for all parsers types."""
# TODO(hanuszczak): Once support for Python 2 is dropped, properties below can
# be defined as abstract, ensuring that all subclasses really define them.
# TODO(hanuszczak): It would be better if parsers identified types that they
# can parse rather than declare supported artifacts (which are defined in a
# completely different place, in an external repository). Then parser can have
# well-defined types.
# A list of string identifiers for artifacts that this parser can process.
supported_artifacts = []
# Any knowledgebase dependencies required by the parser. Dependencies required
# by the artifact itself will be inferred from the artifact definition.
knowledgebase_dependencies = []
# TODO(hanuszczak): Parser should have well defined types and what they can
# return should be defined statically. Moreover, it is not possible to enforce
# that parser really yields what `output_types` specified so this serves no
# purpose other than documentation.
#
# There is only one parser that returns more than one type of value, so maybe
# it should be re-evaluated whether this field actually makes sense.
# The semantic types that can be produced by this parser.
output_types = []
class SingleResponseParser(Parser):
"""An abstract class for parsers that are able to parse individual replies."""
# TODO(hanuszczak): `path_type` is part of the signature only because one of
# the parser classes needs that (`ArtifactFilesParser`). This is a very poor
# design and some other way to avoid having this parameter should be devised.
@abc.abstractmethod
def ParseResponse(self, knowledge_base, response, path_type):
"""Parse a single response from the client.
Args:
knowledge_base: A knowledgebase for the client that provided the response.
response: An RDF value representing the result of artifact collection.
path_type: A path type information used by the `ArtifactFilesParser`.
Raises:
ParseError: If parser is not able to parse the response.
"""
class SingleFileParser(Parser):
"""An interface for parsers that read file content."""
# TODO(hanuszczak): Define a clear file reader interface.
@abc.abstractmethod
def ParseFile(self, knowledge_base, pathspec, filedesc):
"""Parses a single file from the client.
Args:
knowledge_base: A knowledgebase for the client to whom the file belongs.
pathspec: A pathspec corresponding to the parsed file.
filedesc: A file-like object to parse.
Yields:
RDF values with parsed data.
Raises:
ParseError: If parser is not able to parse the file.
"""
class MultiResponseParser(Parser):
"""An interface for parsers requiring all replies in order to parse them."""
@abc.abstractmethod
def ParseResponses(self, knowledge_base, responses):
"""Parse responses from the client.
Args:
knowledge_base: A knowledgebase for the client that provided responses.
responses: A list of RDF values with results of artifact collection.
Raises:
ParseError: If parser is not able to parse the responses.
"""
class MultiFileParser(Parser):
"""An interface for parsers that need to read content of multiple files."""
# TODO(hanuszczak): The file interface mentioned above should also have
# `pathspec` property. With the current solution there is no way to enforce
# on the type level that `pathspecs` and `filedescs` have the same length and
# there is no clear correlation between the two. One possible solution would
# be to use a list of pairs but this is ugly to document.
@abc.abstractmethod
def ParseFiles(self, knowledge_base, pathspecs, filedescs):
"""Parses multiple files from the client.
Args:
knowledge_base: A knowledgebase for the client to whome the files belong.
pathspecs: A list of pathspecs corresponding to the parsed files.
filedescs: A list fo file-like objects to parse.
Yields:
RDF values with parsed data.
Raises:
ParseError: If parser is not able to parse the files.
"""
| apache-2.0 |
rhndg/openedx | lms/djangoapps/shoppingcart/tests/test_microsites.py | 101 | 7876 | """
Tests for Microsite Dashboard with Shopping Cart History
"""
import mock
from django.core.urlresolvers import reverse
from mock import patch
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from shoppingcart.models import (
Order, PaidCourseRegistration, CertificateItem, Donation
)
from student.tests.factories import UserFactory
from course_modes.models import CourseMode
def fake_all_orgs(default=None): # pylint: disable=unused-argument
"""
create a fake list of all microsites
"""
return set(['fakeX', 'fooX'])
def fakex_microsite(name, default=None): # pylint: disable=unused-argument
"""
create a fake microsite site name
"""
return 'fakeX'
def non_microsite(name, default=None): # pylint: disable=unused-argument
"""
create a fake microsite site name
"""
return None
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestOrderHistoryOnMicrositeDashboard(ModuleStoreTestCase):
"""
Test for microsite dashboard order history
"""
def setUp(self):
super(TestOrderHistoryOnMicrositeDashboard, self).setUp()
patcher = patch('student.models.tracker')
self.mock_tracker = patcher.start()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.addCleanup(patcher.stop)
# First Order with our (fakeX) microsite's course.
course1 = CourseFactory.create(org='fakeX', number='999', display_name='fakeX Course')
course1_key = course1.id
course1_mode = CourseMode(course_id=course1_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=20)
course1_mode.save()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, course1_key)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.orderid_microsite = cart.id
# Second Order with another(fooX) microsite's course
course2 = CourseFactory.create(org='fooX', number='888', display_name='fooX Course')
course2_key = course2.id
course2_mode = CourseMode(course_id=course2.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=20)
course2_mode.save()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, course2_key)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.orderid_other_microsite = cart.id
# Third Order with course not attributed to any microsite.
course3 = CourseFactory.create(org='otherorg', number='777', display_name='otherorg Course')
course3_key = course3.id
course3_mode = CourseMode(course_id=course3.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=20)
course3_mode.save()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, course3_key)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.orderid_non_microsite = cart.id
# Fourth Order with course not attributed to any microsite but with a CertificateItem
course4 = CourseFactory.create(org='otherorg', number='888')
course4_key = course4.id
course4_mode = CourseMode(course_id=course4.id,
mode_slug="verified",
mode_display_name="verified cert",
min_price=20)
course4_mode.save()
cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(cart, course4_key, 20.0, 'verified')
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.orderid_cert_non_microsite = cart.id
# Fifth Order with course not attributed to any microsite but with a Donation
course5 = CourseFactory.create(org='otherorg', number='999')
course5_key = course5.id
cart = Order.get_cart_for_user(self.user)
Donation.add_to_order(cart, 20.0, course5_key)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.orderid_donation = cart.id
# also add a donation not associated with a course to make sure the None case works OK
Donation.add_to_order(cart, 10.0, None)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.orderid_courseless_donation = cart.id
@mock.patch("microsite_configuration.microsite.get_value", fakex_microsite)
@mock.patch("microsite_configuration.microsite.get_all_orgs", fake_all_orgs)
def test_when_in_microsite_shows_orders_with_microsite_courses_only(self):
self.client.login(username=self.user.username, password="password")
response = self.client.get(reverse("dashboard"))
receipt_url_microsite_course = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_microsite})
receipt_url_microsite_course2 = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_other_microsite})
receipt_url_non_microsite = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_non_microsite})
receipt_url_cert_non_microsite = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_cert_non_microsite})
receipt_url_donation = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_donation})
self.assertIn(receipt_url_microsite_course, response.content)
self.assertNotIn(receipt_url_microsite_course2, response.content)
self.assertNotIn(receipt_url_non_microsite, response.content)
self.assertNotIn(receipt_url_cert_non_microsite, response.content)
self.assertNotIn(receipt_url_donation, response.content)
@mock.patch("microsite_configuration.microsite.get_value", non_microsite)
@mock.patch("microsite_configuration.microsite.get_all_orgs", fake_all_orgs)
def test_when_not_in_microsite_shows_orders_with_non_microsite_courses_only(self):
self.client.login(username=self.user.username, password="password")
response = self.client.get(reverse("dashboard"))
receipt_url_microsite_course = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_microsite})
receipt_url_microsite_course2 = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_other_microsite})
receipt_url_non_microsite = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_non_microsite})
receipt_url_cert_non_microsite = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_cert_non_microsite})
receipt_url_donation = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_donation})
receipt_url_courseless_donation = reverse('shoppingcart.views.show_receipt', kwargs={'ordernum': self.orderid_courseless_donation})
self.assertNotIn(receipt_url_microsite_course, response.content)
self.assertNotIn(receipt_url_microsite_course2, response.content)
self.assertIn(receipt_url_non_microsite, response.content)
self.assertIn(receipt_url_cert_non_microsite, response.content)
self.assertIn(receipt_url_donation, response.content)
self.assertIn(receipt_url_courseless_donation, response.content)
| agpl-3.0 |
manish211/mase | python101/code/flower.py | 14 | 1416 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
try:
# see if Swampy is installed as a package
from swampy.TurtleWorld import *
except ImportError:
# otherwise see if the modules are on the PYTHONPATH
from TurtleWorld import *
from polygon import *
def petal(t, r, angle):
"""Draws a petal using two arcs.
t: Turtle
r: radius of the arcs
angle: angle (degrees) that subtends the arcs
"""
for i in range(2):
arc(t, r, angle)
lt(t, 180-angle)
def flower(t, n, r, angle):
"""Draws a flower with n petals.
t: Turtle
n: number of petals
r: radius of the arcs
angle: angle (degrees) that subtends the arcs
"""
for i in range(n):
petal(t, r, angle)
lt(t, 360.0/n)
def move(t, length):
"""Move Turtle (t) forward (length) units without leaving a trail.
Leaves the pen down.
"""
pu(t)
fd(t, length)
pd(t)
world = TurtleWorld()
bob = Turtle()
bob.delay = 0.01
# draw a sequence of three flowers, as shown in the book.
move(bob, -100)
flower(bob, 7, 60.0, 60.0)
move(bob, 100)
flower(bob, 10, 40.0, 80.0)
move(bob, 100)
flower(bob, 20, 140.0, 20.0)
die(bob)
# dump the contents of the campus to the file canvas.eps
world.canvas.dump()
wait_for_user()
| unlicense |
AltSchool/django | django/contrib/gis/db/backends/postgis/const.py | 528 | 1484 | """
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/3/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
| bsd-3-clause |
imsplitbit/nova | nova/cmd/api_metadata.py | 6 | 1345 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova Metadata API."""
import sys
from oslo.config import cfg
from nova import config
from nova.openstack.common import log as logging
from nova import service
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
should_use_ssl = 'metadata' in CONF.enabled_ssl_apis
server = service.WSGIService('metadata', use_ssl=should_use_ssl)
service.serve(server, workers=server.workers)
service.wait()
| apache-2.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/calliope/arg_parsers.py | 4 | 20238 | # Copyright 2013 Google Inc. All Rights Reserved.
"""A module that provides parsing utilities for argparse.
For details of how argparse argument pasers work, see:
http://docs.python.org/dev/library/argparse.html#type
Example usage:
import argparse
import arg_parsers
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict(),
action=arg_parser.FloatingListValuesCatcher())
parser.add_argument(
'--delay',
default='5s',
type=arg_parsers.Duration(lower_bound='1s', upper_bound='10s')
parser.add_argument(
'--disk-size',
default='10GB',
type=arg_parsers.BinarySize(lower_bound='1GB', upper_bound='10TB')
# will emit a warning about space-separated metadata
res = parser.parse_args(
'--names --metadata x=y,a=b c=d --delay 1s --disk-size 10gb'.split())
assert res.metadata == {'a': 'b', 'c': 'd', 'x': 'y'}
assert res.delay == 1
assert res.disk_size == 10737418240
"""
import argparse
import datetime
import re
from googlecloudsdk.core import log
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
"""Exceptions that are defined by this module."""
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
"""Exceptions for parsers that are used as argparse types."""
class ArgumentParsingError(Error, argparse.ArgumentError):
"""Raised when there is a problem with user input.
argparse.ArgumentError takes both the action and a message as constructor
parameters.
"""
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
"""Constructs an error message for an exception.
Args:
error: str, The error message that should be displayed. This
message should not end with any punctuation--the full error
message is constructed by appending more information to error.
user_input: str, The user input that caused the error.
error_idx: int, The index at which the error occurred. If None,
the index will not be printed in the error message.
Returns:
str: The message to use for the exception.
"""
if user_input is None:
return error
elif not user_input: # Is input empty?
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<unit>[a-zA-Z]+))? # Optional unit.
$ # End of input marker.
"""
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
# The units are adopted from sleep(1):
# http://linux.die.net/man/1/sleep
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'B': 1,
'KB': 1 << 10,
'MB': 1 << 20,
'GB': 1 << 30,
'TB': 1 << 40,
'PB': 1 << 50,
'KiB': 1 << 10,
'MiB': 1 << 20,
'GiB': 1 << 30,
'TiB': 1 << 40,
'PiB': 1 << 50,
}
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None):
"""A helper that returns a function that can parse values with units.
Casing for all units matters.
Args:
scales: {str: int}, A dictionary mapping units to their magnitudes in
relation to the lowest magnitude unit in the dict.
default_unit: str, The default unit to use if the user's input is
missing unit.
lower_bound: str, An inclusive lower bound.
upper_bound: str, An inclusive upper bound.
Returns:
A function that can parse values.
"""
def UnitsByMagnitude():
"""Returns a list of the units in scales sorted by magnitude."""
return [key for key, _
in sorted(scales.iteritems(), key=lambda value: value[1])]
def Parse(value):
"""Parses value that can contain a unit."""
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude())),
user_input=value))
amount = int(match.group('amount'))
unit = match.group('unit')
if unit is None:
return amount * scales[default_unit]
elif unit in scales:
return amount * scales[unit]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def Duration(lower_bound=None, upper_bound=None):
"""Returns a function that can parse time durations.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "s", "m", "h", and
"d" for seconds, seconds, minutes, hours, and days,
respectively. The casing of the units matters.
If the unit is omitted, seconds is assumed.
The result is parsed in seconds. For example:
parser = Duration()
assert parser('10s') == 10
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
return _ValueParser(_DURATION_SCALES, default_unit='s',
lower_bound=lower_bound, upper_bound=upper_bound)
def BinarySize(lower_bound=None, upper_bound=None):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted, GB is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(_BINARY_SIZE_SCALES, default_unit='GB',
lower_bound=lower_bound, upper_bound=upper_bound)
_KV_PAIR_DELIMITER = '='
class HostPort(object):
"""A class for holding host and port information."""
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
if ':' not in s:
return HostPort(s, None)
parts = s.split(':')
if len(parts) > 2:
raise ArgumentTypeError(
_GenerateErrorMessage('Failed to parse host and port', user_input=s))
return HostPort(parts[0] or None, parts[1] or None)
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage(
"Failed to parse date. Value should be in the form 'YYYY-MM-DD",
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object in UTC timezone."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object."""
if not s:
return None
accepted_formats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ')
# TODO(user): Add timezone support.
for date_format in accepted_formats:
try:
return datetime.datetime.strptime(s, date_format)
except ValueError:
pass
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date. Value should be in ISO or RFC3339 format',
user_input=s))
def BoundedInt(lower_bound=None, upper_bound=None):
"""Returns a function that can parse integers within some bound."""
def _Parse(value):
"""Parses value as an int, raising ArgumentTypeError if out of bounds."""
v = int(value)
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return _Parse
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self, element_type=None, min_length=0, max_length=None,
choices=None):
"""Initialize an ArgList.
Args:
element_type: (str)->str, A function to apply to each of the list items.
min_length: int, The minimum size of the list.
max_length: int, The maximum size of the list.
choices: [element_type], a list of valid possibilities for elements. If
None, then no constraints are imposed.
Returns:
(str)->[str], A function to parse the list of values in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
"""
self.element_type = element_type
if choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value, choices=', '.join(
[str(choice) for choice in choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
def __call__(self, arg_value): # pylint:disable=missing-docstring
delim = self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimiter. Please see `gcloud topic escaping` for '
'information on escaping list or dictionary flag values.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(user): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
class ArgDict(ArgList):
"""Interpret an argument value as a dict.
Intended to be used as the type= for a flag argument. Splits the string on
commas to get a list, and then splits the items on equals to get a set of
key-value pairs to get a dict.
"""
def __init__(self, value_type=None, spec=None, min_length=0, max_length=None):
"""Initialize an ArgDict.
Args:
value_type: (str)->str, A function to apply to each of the dict values.
spec: {str: (str)->str}, A mapping of expected keys to functions.
The functions are applied to the values. If None, an arbitrary
set of keys will be accepted. If not None, it is an error for the
user to supply a key that is not in the spec.
min_length: int, The minimum number of keys in the dict.
max_length: int, The maximum number of keys in the dict.
Returns:
(str)->{str:str}, A function to parse the dict in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
ValueError: If both value_type and spec are provided.
"""
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.value_type = value_type
self.spec = spec
def _ApplySpec(self, key, value):
if key in self.spec:
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are {0}'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def __call__(self, arg_value): # pylint:disable=missing-docstring
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = {}
for arg in arg_list:
split_arg = arg.split('=', 1) # only use the first =
# TODO(user): These exceptions won't present well to the user.
if len(split_arg) != 2:
raise ArgumentTypeError(
('Bad syntax for dict arg: {0}. Please see `gcloud topic escaping` '
'if you would like information on escaping list or dictionary '
'flag values.').format(repr(arg)))
key, value = split_arg
if not key:
raise ArgumentTypeError('bad key for dict arg: '+repr(arg))
if self.value_type:
value = self.value_type(value)
if self.spec:
value = self._ApplySpec(key, value)
arg_dict[key] = value
return arg_dict
# pylint:disable=protected-access
def FloatingListValuesCatcher(
action=argparse._StoreAction, switch_value=None):
"""Create an action for catching floating list values.
Args:
action: argparse.Action, the superclass of the new action.
switch_value: obj, If not none, allow users to specify no value for the
flag. If the flag is given and no value is specified, the switch_value
will be used instead.
Returns:
argparse.Action, an action that will catch list values separated by spaces.
"""
class FloatingListValuesCatcherAction(action):
"""This is to assist with refactoring argument lists.
Provides a error for users who type (or have a script) that specifies a list
with the elements in different arguments. eg.
$ gcloud sql instances create foo --authorized-networks x y
usage: gcloud sql instances create INSTANCE [optional flags]
ERROR: (gcloud.sql.instances.create) argument --authorized-networks: lists
are separated by commas, try "--authorized-networks=x,y"
To do this, with flags that used to (but no longer) have nargs set to take
multiple values we apply an action designed to catch them by transparently
setting nargs to '+', and then making sure only 1 value is provided.
As a caveat, this means that people still cannot put positional arguments
after the flags. So, this is a temporary mechanism designed to inform users,
and we'll remove it eventually.
"""
def __init__(self, *args, **kwargs):
if 'nargs' in kwargs:
# Make sure nothing weird is happening, first. This action is intended
# only for use with --flags that have the type as ArgList or ArgDict,
# and do not set nargs at all.
raise ValueError(
'trying to catch floating lists for a misspecified flag list')
if switch_value is not None:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
super(FloatingListValuesCatcherAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not values and switch_value is not None:
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, switch_value, option_string=option_string)
return
if len(values) > 1:
class ArgShell(object):
"""Class designed to trick argparse into displaying a nice error."""
def __init__(self, name):
self.option_strings = [name]
suggestions = []
if values and isinstance(values[0], dict):
aggregate_value = {}
for valdict in values:
aggregate_value.update(valdict)
suggestions.extend(
['%s=%s' % (k, v) for k, v in valdict.iteritems()])
if values and isinstance(values[0], list):
aggregate_value = []
suggestions.extend(
[','.join(map(str, vallist)) for vallist in values])
for vallist in values:
aggregate_value.extend(vallist)
extras = suggestions[1:]
msg = (
'We noticed that you are using space-separated lists, which are '
'deprecated. '
'Please transition to using comma-separated lists instead '
'(try "{flag} {values}"). '
'If you intend to use [{extras}] as positional arguments, put the '
'flags at the end.').format(
flag=option_string,
values=','.join(suggestions),
extras=', '.join(extras))
# TODO(user): stop warning when we're ready
warn_only = True
if not warn_only:
raise argparse.ArgumentError(ArgShell(option_string), msg)
else:
log.warn(msg)
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, aggregate_value, option_string=option_string)
else:
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, values[0], option_string=option_string)
return FloatingListValuesCatcherAction
| apache-2.0 |
lattwood/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/descriptor.py | 260 | 22737 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
if api_implementation.Type() == 'cpp':
from google.protobuf.internal import cpp_message
class Error(Exception):
"""Base error for this module."""
class DescriptorBase(object):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionaility.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
options: (descriptor_pb2.MessageOptions) Protocol message options or None
to use default message options.
file: (FileDescriptor) Reference to file descriptor.
"""
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self.nested_types = nested_types
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attriubtes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
if api_implementation.Type() == 'cpp':
if is_extension:
self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = None
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
def __init__(self, name, full_name, index, methods, options=None, file=None,
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end)
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
name: name of file, relative to root of source tree.
package: name of the package
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
"""
def __init__(self, name, package, options=None, serialized_pb=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
self.message_types_by_name = {}
self.name = name
self.package = package
self.serialized_pb = serialized_pb
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
cpp_message.BuildFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
| bsd-3-clause |
dednal/chromium.src | third_party/tlslite/tlslite/utils/pycrypto_rsakey.py | 55 | 1338 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""PyCrypto RSA implementation."""
from .cryptomath import *
from .rsakey import *
from .python_rsakey import Python_RSAKey
if pycryptoLoaded:
from Crypto.PublicKey import RSA
class PyCrypto_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if not d:
self.rsa = RSA.construct( (n, e) )
else:
self.rsa = RSA.construct( (n, e, d, p, q) )
def __getattr__(self, name):
return getattr(self.rsa, name)
def hasPrivateKey(self):
return self.rsa.has_private()
def _rawPrivateKeyOp(self, m):
s = bytes(numberToByteArray(m, numBytes(self.n)))
c = bytesToNumber(bytearray(self.rsa.decrypt((s,))))
return c
def _rawPublicKeyOp(self, c):
s = bytes(numberToByteArray(c, numBytes(self.n)))
m = bytesToNumber(bytearray(self.rsa.encrypt(s, None)[0]))
return m
def generate(bits):
key = PyCrypto_RSAKey()
def f(numBytes):
return bytes(getRandomBytes(numBytes))
key.rsa = RSA.generate(bits, f)
return key
generate = staticmethod(generate)
| bsd-3-clause |
wanglongqi/sympy | sympy/simplify/radsimp.py | 46 | 35730 | from __future__ import print_function, division
from collections import defaultdict
from sympy import SYMPY_DEBUG
from sympy.core.evaluate import global_evaluate
from sympy.core.compatibility import iterable, ordered, as_int, default_sort_key
from sympy.core import expand_power_base, sympify, Add, S, Mul, Derivative, Pow, symbols, expand_mul
from sympy.core.numbers import Rational, Float
from sympy.core.exprtools import Factors, gcd_terms
from sympy.core.mul import _keep_coeff, _unevaluated_Mul
from sympy.core.function import _mexpand
from sympy.core.add import _unevaluated_Add
from sympy.functions import exp, sqrt, log
from sympy.polys import gcd
from sympy.simplify.sqrtdenest import sqrtdenest
import mpmath
def collect(expr, syms, func=None, evaluate=None, exact=False, distribute_order_term=True):
"""
Collect additive terms of an expression.
This function collects additive terms of an expression with respect
to a list of expression up to powers with rational exponents. By the
term symbol here are meant arbitrary expressions, which can contain
powers, products, sums etc. In other words symbol is a pattern which
will be searched for in the expression's terms.
The input expression is not expanded by :func:`collect`, so user is
expected to provide an expression is an appropriate form. This makes
:func:`collect` more predictable as there is no magic happening behind the
scenes. However, it is important to note, that powers of products are
converted to products of powers using the :func:`expand_power_base`
function.
There are two possible types of output. First, if ``evaluate`` flag is
set, this function will return an expression with collected terms or
else it will return a dictionary with expressions up to rational powers
as keys and collected coefficients as values.
Examples
========
>>> from sympy import S, collect, expand, factor, Wild
>>> from sympy.abc import a, b, c, x, y, z
This function can collect symbolic coefficients in polynomials or
rational expressions. It will manage to find all integer or rational
powers of collection variable::
>>> collect(a*x**2 + b*x**2 + a*x - b*x + c, x)
c + x**2*(a + b) + x*(a - b)
The same result can be achieved in dictionary form::
>>> d = collect(a*x**2 + b*x**2 + a*x - b*x + c, x, evaluate=False)
>>> d[x**2]
a + b
>>> d[x]
a - b
>>> d[S.One]
c
You can also work with multivariate polynomials. However, remember that
this function is greedy so it will care only about a single symbol at time,
in specification order::
>>> collect(x**2 + y*x**2 + x*y + y + a*y, [x, y])
x**2*(y + 1) + x*y + y*(a + 1)
Also more complicated expressions can be used as patterns::
>>> from sympy import sin, log
>>> collect(a*sin(2*x) + b*sin(2*x), sin(2*x))
(a + b)*sin(2*x)
>>> collect(a*x*log(x) + b*(x*log(x)), x*log(x))
x*(a + b)*log(x)
You can use wildcards in the pattern::
>>> w = Wild('w1')
>>> collect(a*x**y - b*x**y, w**y)
x**y*(a - b)
It is also possible to work with symbolic powers, although it has more
complicated behavior, because in this case power's base and symbolic part
of the exponent are treated as a single symbol::
>>> collect(a*x**c + b*x**c, x)
a*x**c + b*x**c
>>> collect(a*x**c + b*x**c, x**c)
x**c*(a + b)
However if you incorporate rationals to the exponents, then you will get
well known behavior::
>>> collect(a*x**(2*c) + b*x**(2*c), x**c)
x**(2*c)*(a + b)
Note also that all previously stated facts about :func:`collect` function
apply to the exponential function, so you can get::
>>> from sympy import exp
>>> collect(a*exp(2*x) + b*exp(2*x), exp(x))
(a + b)*exp(2*x)
If you are interested only in collecting specific powers of some symbols
then set ``exact`` flag in arguments::
>>> collect(a*x**7 + b*x**7, x, exact=True)
a*x**7 + b*x**7
>>> collect(a*x**7 + b*x**7, x**7, exact=True)
x**7*(a + b)
You can also apply this function to differential equations, where
derivatives of arbitrary order can be collected. Note that if you
collect with respect to a function or a derivative of a function, all
derivatives of that function will also be collected. Use
``exact=True`` to prevent this from happening::
>>> from sympy import Derivative as D, collect, Function
>>> f = Function('f') (x)
>>> collect(a*D(f,x) + b*D(f,x), D(f,x))
(a + b)*Derivative(f(x), x)
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), f)
(a + b)*Derivative(f(x), x, x)
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), D(f,x), exact=True)
a*Derivative(f(x), x, x) + b*Derivative(f(x), x, x)
>>> collect(a*D(f,x) + b*D(f,x) + a*f + b*f, f)
(a + b)*f(x) + (a + b)*Derivative(f(x), x)
Or you can even match both derivative order and exponent at the same time::
>>> collect(a*D(D(f,x),x)**2 + b*D(D(f,x),x)**2, D(f,x))
(a + b)*Derivative(f(x), x, x)**2
Finally, you can apply a function to each of the collected coefficients.
For example you can factorize symbolic coefficients of polynomial::
>>> f = expand((x + a + 1)**3)
>>> collect(f, x, factor)
x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + (a + 1)**3
.. note:: Arguments are expected to be in expanded form, so you might have
to call :func:`expand` prior to calling this function.
See Also
========
collect_const, collect_sqrt, rcollect
"""
if evaluate is None:
evaluate = global_evaluate[0]
def make_expression(terms):
product = []
for term, rat, sym, deriv in terms:
if deriv is not None:
var, order = deriv
while order > 0:
term, order = Derivative(term, var), order - 1
if sym is None:
if rat is S.One:
product.append(term)
else:
product.append(Pow(term, rat))
else:
product.append(Pow(term, rat*sym))
return Mul(*product)
def parse_derivative(deriv):
# scan derivatives tower in the input expression and return
# underlying function and maximal differentiation order
expr, sym, order = deriv.expr, deriv.variables[0], 1
for s in deriv.variables[1:]:
if s == sym:
order += 1
else:
raise NotImplementedError(
'Improve MV Derivative support in collect')
while isinstance(expr, Derivative):
s0 = expr.variables[0]
for s in expr.variables:
if s != s0:
raise NotImplementedError(
'Improve MV Derivative support in collect')
if s0 == sym:
expr, order = expr.expr, order + len(expr.variables)
else:
break
return expr, (sym, Rational(order))
def parse_term(expr):
"""Parses expression expr and outputs tuple (sexpr, rat_expo,
sym_expo, deriv)
where:
- sexpr is the base expression
- rat_expo is the rational exponent that sexpr is raised to
- sym_expo is the symbolic exponent that sexpr is raised to
- deriv contains the derivatives the the expression
for example, the output of x would be (x, 1, None, None)
the output of 2**x would be (2, 1, x, None)
"""
rat_expo, sym_expo = S.One, None
sexpr, deriv = expr, None
if expr.is_Pow:
if isinstance(expr.base, Derivative):
sexpr, deriv = parse_derivative(expr.base)
else:
sexpr = expr.base
if expr.exp.is_Number:
rat_expo = expr.exp
else:
coeff, tail = expr.exp.as_coeff_Mul()
if coeff.is_Number:
rat_expo, sym_expo = coeff, tail
else:
sym_expo = expr.exp
elif expr.func is exp:
arg = expr.args[0]
if arg.is_Rational:
sexpr, rat_expo = S.Exp1, arg
elif arg.is_Mul:
coeff, tail = arg.as_coeff_Mul(rational=True)
sexpr, rat_expo = exp(tail), coeff
elif isinstance(expr, Derivative):
sexpr, deriv = parse_derivative(expr)
return sexpr, rat_expo, sym_expo, deriv
def parse_expression(terms, pattern):
"""Parse terms searching for a pattern.
terms is a list of tuples as returned by parse_terms;
pattern is an expression treated as a product of factors
"""
pattern = Mul.make_args(pattern)
if len(terms) < len(pattern):
# pattern is longer than matched product
# so no chance for positive parsing result
return None
else:
pattern = [parse_term(elem) for elem in pattern]
terms = terms[:] # need a copy
elems, common_expo, has_deriv = [], None, False
for elem, e_rat, e_sym, e_ord in pattern:
if elem.is_Number and e_rat == 1 and e_sym is None:
# a constant is a match for everything
continue
for j in range(len(terms)):
if terms[j] is None:
continue
term, t_rat, t_sym, t_ord = terms[j]
# keeping track of whether one of the terms had
# a derivative or not as this will require rebuilding
# the expression later
if t_ord is not None:
has_deriv = True
if (term.match(elem) is not None and
(t_sym == e_sym or t_sym is not None and
e_sym is not None and
t_sym.match(e_sym) is not None)):
if exact is False:
# we don't have to be exact so find common exponent
# for both expression's term and pattern's element
expo = t_rat / e_rat
if common_expo is None:
# first time
common_expo = expo
else:
# common exponent was negotiated before so
# there is no chance for a pattern match unless
# common and current exponents are equal
if common_expo != expo:
common_expo = 1
else:
# we ought to be exact so all fields of
# interest must match in every details
if e_rat != t_rat or e_ord != t_ord:
continue
# found common term so remove it from the expression
# and try to match next element in the pattern
elems.append(terms[j])
terms[j] = None
break
else:
# pattern element not found
return None
return [_f for _f in terms if _f], elems, common_expo, has_deriv
if evaluate:
if expr.is_Mul:
return expr.func(*[
collect(term, syms, func, True, exact, distribute_order_term)
for term in expr.args])
elif expr.is_Pow:
b = collect(
expr.base, syms, func, True, exact, distribute_order_term)
return Pow(b, expr.exp)
if iterable(syms):
syms = [expand_power_base(i, deep=False) for i in syms]
else:
syms = [expand_power_base(syms, deep=False)]
expr = sympify(expr)
order_term = None
if distribute_order_term:
order_term = expr.getO()
if order_term is not None:
if order_term.has(*syms):
order_term = None
else:
expr = expr.removeO()
summa = [expand_power_base(i, deep=False) for i in Add.make_args(expr)]
collected, disliked = defaultdict(list), S.Zero
for product in summa:
terms = [parse_term(i) for i in Mul.make_args(product)]
for symbol in syms:
if SYMPY_DEBUG:
print("DEBUG: parsing of expression %s with symbol %s " % (
str(terms), str(symbol))
)
result = parse_expression(terms, symbol)
if SYMPY_DEBUG:
print("DEBUG: returned %s" % str(result))
if result is not None:
terms, elems, common_expo, has_deriv = result
# when there was derivative in current pattern we
# will need to rebuild its expression from scratch
if not has_deriv:
index = 1
for elem in elems:
e = elem[1]
if elem[2] is not None:
e *= elem[2]
index *= Pow(elem[0], e)
else:
index = make_expression(elems)
terms = expand_power_base(make_expression(terms), deep=False)
index = expand_power_base(index, deep=False)
collected[index].append(terms)
break
else:
# none of the patterns matched
disliked += product
# add terms now for each key
collected = dict([(k, Add(*v)) for k, v in collected.items()])
if disliked is not S.Zero:
collected[S.One] = disliked
if order_term is not None:
for key, val in collected.items():
collected[key] = val + order_term
if func is not None:
collected = dict(
[(key, func(val)) for key, val in collected.items()])
if evaluate:
return Add(*[key*val for key, val in collected.items()])
else:
return collected
def rcollect(expr, *vars):
"""
Recursively collect sums in an expression.
Examples
========
>>> from sympy.simplify import rcollect
>>> from sympy.abc import x, y
>>> expr = (x**2*y + x*y + x + y)/(x + y)
>>> rcollect(expr, y)
(x + y*(x**2 + x + 1))/(x + y)
See Also
========
collect, collect_const, collect_sqrt
"""
if expr.is_Atom or not expr.has(*vars):
return expr
else:
expr = expr.__class__(*[rcollect(arg, *vars) for arg in expr.args])
if expr.is_Add:
return collect(expr, vars)
else:
return expr
def collect_sqrt(expr, evaluate=None):
"""Return expr with terms having common square roots collected together.
If ``evaluate`` is False a count indicating the number of sqrt-containing
terms will be returned and, if non-zero, the terms of the Add will be
returned, else the expression itself will be returned as a single term.
If ``evaluate`` is True, the expression with any collected terms will be
returned.
Note: since I = sqrt(-1), it is collected, too.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b
>>> r2, r3, r5 = [sqrt(i) for i in [2, 3, 5]]
>>> collect_sqrt(a*r2 + b*r2)
sqrt(2)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r3)
sqrt(2)*(a + b) + sqrt(3)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5)
sqrt(3)*a + sqrt(5)*b + sqrt(2)*(a + b)
If evaluate is False then the arguments will be sorted and
returned as a list and a count of the number of sqrt-containing
terms will be returned:
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5, evaluate=False)
((sqrt(3)*a, sqrt(5)*b, sqrt(2)*(a + b)), 3)
>>> collect_sqrt(a*sqrt(2) + b, evaluate=False)
((b, sqrt(2)*a), 1)
>>> collect_sqrt(a + b, evaluate=False)
((a + b,), 0)
See Also
========
collect, collect_const, rcollect
"""
if evaluate is None:
evaluate = global_evaluate[0]
# this step will help to standardize any complex arguments
# of sqrts
coeff, expr = expr.as_content_primitive()
vars = set()
for a in Add.make_args(expr):
for m in a.args_cnc()[0]:
if m.is_number and (
m.is_Pow and m.exp.is_Rational and m.exp.q == 2 or
m is S.ImaginaryUnit):
vars.add(m)
# we only want radicals, so exclude Number handling; in this case
# d will be evaluated
d = collect_const(expr, *vars, Numbers=False)
hit = expr != d
if not evaluate:
nrad = 0
# make the evaluated args canonical
args = list(ordered(Add.make_args(d)))
for i, m in enumerate(args):
c, nc = m.args_cnc()
for ci in c:
# XXX should this be restricted to ci.is_number as above?
if ci.is_Pow and ci.exp.is_Rational and ci.exp.q == 2 or \
ci is S.ImaginaryUnit:
nrad += 1
break
args[i] *= coeff
if not (hit or nrad):
args = [Add(*args)]
return tuple(args), nrad
return coeff*d
def collect_const(expr, *vars, **kwargs):
"""A non-greedy collection of terms with similar number coefficients in
an Add expr. If ``vars`` is given then only those constants will be
targeted. Although any Number can also be targeted, if this is not
desired set ``Numbers=False`` and no Float or Rational will be collected.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import a, s, x, y, z
>>> from sympy.simplify.radsimp import collect_const
>>> collect_const(sqrt(3) + sqrt(3)*(1 + sqrt(2)))
sqrt(3)*(sqrt(2) + 2)
>>> collect_const(sqrt(3)*s + sqrt(7)*s + sqrt(3) + sqrt(7))
(sqrt(3) + sqrt(7))*(s + 1)
>>> s = sqrt(2) + 2
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7))
(sqrt(2) + 3)*(sqrt(3) + sqrt(7))
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7), sqrt(3))
sqrt(7) + sqrt(3)*(sqrt(2) + 3) + sqrt(7)*(sqrt(2) + 2)
The collection is sign-sensitive, giving higher precedence to the
unsigned values:
>>> collect_const(x - y - z)
x - (y + z)
>>> collect_const(-y - z)
-(y + z)
>>> collect_const(2*x - 2*y - 2*z, 2)
2*(x - y - z)
>>> collect_const(2*x - 2*y - 2*z, -2)
2*x - 2*(y + z)
See Also
========
collect, collect_sqrt, rcollect
"""
if not expr.is_Add:
return expr
recurse = False
Numbers = kwargs.get('Numbers', True)
if not vars:
recurse = True
vars = set()
for a in expr.args:
for m in Mul.make_args(a):
if m.is_number:
vars.add(m)
else:
vars = sympify(vars)
if not Numbers:
vars = [v for v in vars if not v.is_Number]
vars = list(ordered(vars))
for v in vars:
terms = defaultdict(list)
Fv = Factors(v)
for m in Add.make_args(expr):
f = Factors(m)
q, r = f.div(Fv)
if r.is_one:
# only accept this as a true factor if
# it didn't change an exponent from an Integer
# to a non-Integer, e.g. 2/sqrt(2) -> sqrt(2)
# -- we aren't looking for this sort of change
fwas = f.factors.copy()
fnow = q.factors
if not any(k in fwas and fwas[k].is_Integer and not
fnow[k].is_Integer for k in fnow):
terms[v].append(q.as_expr())
continue
terms[S.One].append(m)
args = []
hit = False
uneval = False
for k in ordered(terms):
v = terms[k]
if k is S.One:
args.extend(v)
continue
if len(v) > 1:
v = Add(*v)
hit = True
if recurse and v != expr:
vars.append(v)
else:
v = v[0]
# be careful not to let uneval become True unless
# it must be because it's going to be more expensive
# to rebuild the expression as an unevaluated one
if Numbers and k.is_Number and v.is_Add:
args.append(_keep_coeff(k, v, sign=True))
uneval = True
else:
args.append(k*v)
if hit:
if uneval:
expr = _unevaluated_Add(*args)
else:
expr = Add(*args)
if not expr.is_Add:
break
return expr
def radsimp(expr, symbolic=True, max_terms=4):
"""
Rationalize the denominator by removing square roots.
Note: the expression returned from radsimp must be used with caution
since if the denominator contains symbols, it will be possible to make
substitutions that violate the assumptions of the simplification process:
that for a denominator matching a + b*sqrt(c), a != +/-b*sqrt(c). (If
there are no symbols, this assumptions is made valid by collecting terms
of sqrt(c) so the match variable ``a`` does not contain ``sqrt(c)``.) If
you do not want the simplification to occur for symbolic denominators, set
``symbolic`` to False.
If there are more than ``max_terms`` radical terms then the expression is
returned unchanged.
Examples
========
>>> from sympy import radsimp, sqrt, Symbol, denom, pprint, I
>>> from sympy import factor_terms, fraction, signsimp
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b, c
>>> radsimp(1/(I + 1))
(1 - I)/2
>>> radsimp(1/(2 + sqrt(2)))
(-sqrt(2) + 2)/2
>>> x,y = map(Symbol, 'xy')
>>> e = ((2 + 2*sqrt(2))*x + (2 + sqrt(8))*y)/(2 + sqrt(2))
>>> radsimp(e)
sqrt(2)*(x + y)
No simplification beyond removal of the gcd is done. One might
want to polish the result a little, however, by collecting
square root terms:
>>> r2 = sqrt(2)
>>> r5 = sqrt(5)
>>> ans = radsimp(1/(y*r2 + x*r2 + a*r5 + b*r5)); pprint(ans)
___ ___ ___ ___
\/ 5 *a + \/ 5 *b - \/ 2 *x - \/ 2 *y
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
>>> n, d = fraction(ans)
>>> pprint(factor_terms(signsimp(collect_sqrt(n))/d, radical=True))
___ ___
\/ 5 *(a + b) - \/ 2 *(x + y)
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
If radicals in the denominator cannot be removed or there is no denominator,
the original expression will be returned.
>>> radsimp(sqrt(2)*x + sqrt(2))
sqrt(2)*x + sqrt(2)
Results with symbols will not always be valid for all substitutions:
>>> eq = 1/(a + b*sqrt(c))
>>> eq.subs(a, b*sqrt(c))
1/(2*b*sqrt(c))
>>> radsimp(eq).subs(a, b*sqrt(c))
nan
If symbolic=False, symbolic denominators will not be transformed (but
numeric denominators will still be processed):
>>> radsimp(eq, symbolic=False)
1/(a + b*sqrt(c))
"""
from sympy.simplify.simplify import signsimp
syms = symbols("a:d A:D")
def _num(rterms):
# return the multiplier that will simplify the expression described
# by rterms [(sqrt arg, coeff), ... ]
a, b, c, d, A, B, C, D = syms
if len(rterms) == 2:
reps = dict(list(zip([A, a, B, b], [j for i in rterms for j in i])))
return (
sqrt(A)*a - sqrt(B)*b).xreplace(reps)
if len(rterms) == 3:
reps = dict(list(zip([A, a, B, b, C, c], [j for i in rterms for j in i])))
return (
(sqrt(A)*a + sqrt(B)*b - sqrt(C)*c)*(2*sqrt(A)*sqrt(B)*a*b - A*a**2 -
B*b**2 + C*c**2)).xreplace(reps)
elif len(rterms) == 4:
reps = dict(list(zip([A, a, B, b, C, c, D, d], [j for i in rterms for j in i])))
return ((sqrt(A)*a + sqrt(B)*b - sqrt(C)*c - sqrt(D)*d)*(2*sqrt(A)*sqrt(B)*a*b
- A*a**2 - B*b**2 - 2*sqrt(C)*sqrt(D)*c*d + C*c**2 +
D*d**2)*(-8*sqrt(A)*sqrt(B)*sqrt(C)*sqrt(D)*a*b*c*d + A**2*a**4 -
2*A*B*a**2*b**2 - 2*A*C*a**2*c**2 - 2*A*D*a**2*d**2 + B**2*b**4 -
2*B*C*b**2*c**2 - 2*B*D*b**2*d**2 + C**2*c**4 - 2*C*D*c**2*d**2 +
D**2*d**4)).xreplace(reps)
elif len(rterms) == 1:
return sqrt(rterms[0][0])
else:
raise NotImplementedError
def ispow2(d, log2=False):
if not d.is_Pow:
return False
e = d.exp
if e.is_Rational and e.q == 2 or symbolic and fraction(e)[1] == 2:
return True
if log2:
q = 1
if e.is_Rational:
q = e.q
elif symbolic:
d = fraction(e)[1]
if d.is_Integer:
q = d
if q != 1 and log(q, 2).is_Integer:
return True
return False
def handle(expr):
# Handle first reduces to the case
# expr = 1/d, where d is an add, or d is base**p/2.
# We do this by recursively calling handle on each piece.
from sympy.simplify.simplify import nsimplify
n, d = fraction(expr)
if expr.is_Atom or (d.is_Atom and n.is_Atom):
return expr
elif not n.is_Atom:
n = n.func(*[handle(a) for a in n.args])
return _unevaluated_Mul(n, handle(1/d))
elif n is not S.One:
return _unevaluated_Mul(n, handle(1/d))
elif d.is_Mul:
return _unevaluated_Mul(*[handle(1/d) for d in d.args])
# By this step, expr is 1/d, and d is not a mul.
if not symbolic and d.free_symbols:
return expr
if ispow2(d):
d2 = sqrtdenest(sqrt(d.base))**fraction(d.exp)[0]
if d2 != d:
return handle(1/d2)
elif d.is_Pow and (d.exp.is_integer or d.base.is_positive):
# (1/d**i) = (1/d)**i
return handle(1/d.base)**d.exp
if not (d.is_Add or ispow2(d)):
return 1/d.func(*[handle(a) for a in d.args])
# handle 1/d treating d as an Add (though it may not be)
keep = True # keep changes that are made
# flatten it and collect radicals after checking for special
# conditions
d = _mexpand(d)
# did it change?
if d.is_Atom:
return 1/d
# is it a number that might be handled easily?
if d.is_number:
_d = nsimplify(d)
if _d.is_Number and _d.equals(d):
return 1/_d
while True:
# collect similar terms
collected = defaultdict(list)
for m in Add.make_args(d): # d might have become non-Add
p2 = []
other = []
for i in Mul.make_args(m):
if ispow2(i, log2=True):
p2.append(i.base if i.exp is S.Half else i.base**(2*i.exp))
elif i is S.ImaginaryUnit:
p2.append(S.NegativeOne)
else:
other.append(i)
collected[tuple(ordered(p2))].append(Mul(*other))
rterms = list(ordered(list(collected.items())))
rterms = [(Mul(*i), Add(*j)) for i, j in rterms]
nrad = len(rterms) - (1 if rterms[0][0] is S.One else 0)
if nrad < 1:
break
elif nrad > max_terms:
# there may have been invalid operations leading to this point
# so don't keep changes, e.g. this expression is troublesome
# in collecting terms so as not to raise the issue of 2834:
# r = sqrt(sqrt(5) + 5)
# eq = 1/(sqrt(5)*r + 2*sqrt(5)*sqrt(-sqrt(5) + 5) + 5*r)
keep = False
break
if len(rterms) > 4:
# in general, only 4 terms can be removed with repeated squaring
# but other considerations can guide selection of radical terms
# so that radicals are removed
if all([x.is_Integer and (y**2).is_Rational for x, y in rterms]):
nd, d = rad_rationalize(S.One, Add._from_args(
[sqrt(x)*y for x, y in rterms]))
n *= nd
else:
# is there anything else that might be attempted?
keep = False
break
from sympy.simplify.powsimp import powsimp, powdenest
num = powsimp(_num(rterms))
n *= num
d *= num
d = powdenest(_mexpand(d), force=symbolic)
if d.is_Atom:
break
if not keep:
return expr
return _unevaluated_Mul(n, 1/d)
coeff, expr = expr.as_coeff_Add()
expr = expr.normal()
old = fraction(expr)
n, d = fraction(handle(expr))
if old != (n, d):
if not d.is_Atom:
was = (n, d)
n = signsimp(n, evaluate=False)
d = signsimp(d, evaluate=False)
u = Factors(_unevaluated_Mul(n, 1/d))
u = _unevaluated_Mul(*[k**v for k, v in u.factors.items()])
n, d = fraction(u)
if old == (n, d):
n, d = was
n = expand_mul(n)
if d.is_Number or d.is_Add:
n2, d2 = fraction(gcd_terms(_unevaluated_Mul(n, 1/d)))
if d2.is_Number or (d2.count_ops() <= d.count_ops()):
n, d = [signsimp(i) for i in (n2, d2)]
if n.is_Mul and n.args[0].is_Number:
n = n.func(*n.args)
return coeff + _unevaluated_Mul(n, 1/d)
def rad_rationalize(num, den):
"""
Rationalize num/den by removing square roots in the denominator;
num and den are sum of terms whose squares are rationals
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import rad_rationalize
>>> rad_rationalize(sqrt(3), 1 + sqrt(2)/3)
(-sqrt(3) + sqrt(6)/3, -7/9)
"""
if not den.is_Add:
return num, den
g, a, b = split_surds(den)
a = a*sqrt(g)
num = _mexpand((a - b)*num)
den = _mexpand(a**2 - b**2)
return rad_rationalize(num, den)
def fraction(expr, exact=False):
"""Returns a pair with expression's numerator and denominator.
If the given expression is not a fraction then this function
will return the tuple (expr, 1).
This function will not make any attempt to simplify nested
fractions or to do any term rewriting at all.
If only one of the numerator/denominator pair is needed then
use numer(expr) or denom(expr) functions respectively.
>>> from sympy import fraction, Rational, Symbol
>>> from sympy.abc import x, y
>>> fraction(x/y)
(x, y)
>>> fraction(x)
(x, 1)
>>> fraction(1/y**2)
(1, y**2)
>>> fraction(x*y/2)
(x*y, 2)
>>> fraction(Rational(1, 2))
(1, 2)
This function will also work fine with assumptions:
>>> k = Symbol('k', negative=True)
>>> fraction(x * y**k)
(x, y**(-k))
If we know nothing about sign of some exponent and 'exact'
flag is unset, then structure this exponent's structure will
be analyzed and pretty fraction will be returned:
>>> from sympy import exp
>>> fraction(2*x**(-y))
(2, x**y)
>>> fraction(exp(-x))
(1, exp(x))
>>> fraction(exp(-x), exact=True)
(exp(-x), 1)
"""
expr = sympify(expr)
numer, denom = [], []
for term in Mul.make_args(expr):
if term.is_commutative and (term.is_Pow or term.func is exp):
b, ex = term.as_base_exp()
if ex.is_negative:
if ex is S.NegativeOne:
denom.append(b)
else:
denom.append(Pow(b, -ex))
elif ex.is_positive:
numer.append(term)
elif not exact and ex.is_Mul:
n, d = term.as_numer_denom()
numer.append(n)
denom.append(d)
else:
numer.append(term)
elif term.is_Rational:
n, d = term.as_numer_denom()
numer.append(n)
denom.append(d)
else:
numer.append(term)
return Mul(*numer), Mul(*denom)
def numer(expr):
return fraction(expr)[0]
def denom(expr):
return fraction(expr)[1]
def fraction_expand(expr, **hints):
return expr.expand(frac=True, **hints)
def numer_expand(expr, **hints):
a, b = fraction(expr)
return a.expand(numer=True, **hints) / b
def denom_expand(expr, **hints):
a, b = fraction(expr)
return a / b.expand(denom=True, **hints)
expand_numer = numer_expand
expand_denom = denom_expand
expand_fraction = fraction_expand
def split_surds(expr):
"""
split an expression with terms whose squares are rationals
into a sum of terms whose surds squared have gcd equal to g
and a sum of terms with surds squared prime with g
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import split_surds
>>> split_surds(3*sqrt(3) + sqrt(5)/7 + sqrt(6) + sqrt(10) + sqrt(15))
(3, sqrt(2) + sqrt(5) + 3, sqrt(5)/7 + sqrt(10))
"""
args = sorted(expr.args, key=default_sort_key)
coeff_muls = [x.as_coeff_Mul() for x in args]
surds = [x[1]**2 for x in coeff_muls if x[1].is_Pow]
surds.sort(key=default_sort_key)
g, b1, b2 = _split_gcd(*surds)
g2 = g
if not b2 and len(b1) >= 2:
b1n = [x/g for x in b1]
b1n = [x for x in b1n if x != 1]
# only a common factor has been factored; split again
g1, b1n, b2 = _split_gcd(*b1n)
g2 = g*g1
a1v, a2v = [], []
for c, s in coeff_muls:
if s.is_Pow and s.exp == S.Half:
s1 = s.base
if s1 in b1:
a1v.append(c*sqrt(s1/g2))
else:
a2v.append(c*s)
else:
a2v.append(c*s)
a = Add(*a1v)
b = Add(*a2v)
return g2, a, b
def _split_gcd(*a):
"""
split the list of integers ``a`` into a list of integers, ``a1`` having
``g = gcd(a1)``, and a list ``a2`` whose elements are not divisible by
``g``. Returns ``g, a1, a2``
Examples
========
>>> from sympy.simplify.radsimp import _split_gcd
>>> _split_gcd(55, 35, 22, 14, 77, 10)
(5, [55, 35, 10], [22, 14, 77])
"""
g = a[0]
b1 = [g]
b2 = []
for x in a[1:]:
g1 = gcd(g, x)
if g1 == 1:
b2.append(x)
else:
g = g1
b1.append(x)
return g, b1, b2
| bsd-3-clause |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/tests/test_distroseriesparent.py | 1 | 9245 | # Copyright 2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for DistroSeriesParent model class."""
__metaclass__ = type
from testtools.matchers import MatchesStructure
from zope.component import getUtility
from zope.interface.verify import verifyObject
from zope.security.interfaces import Unauthorized
from lp.registry.interfaces.distroseriesparent import (
IDistroSeriesParent,
IDistroSeriesParentSet,
)
from lp.registry.interfaces.pocket import PackagePublishingPocket
from lp.soyuz.interfaces.component import IComponentSet
from lp.testing import (
login,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import (
DatabaseFunctionalLayer,
ZopelessDatabaseLayer,
)
from lp.testing.sampledata import LAUNCHPAD_ADMIN
class TestDistroSeriesParent(TestCaseWithFactory):
"""Test the `DistroSeriesParent` model."""
layer = ZopelessDatabaseLayer
def test_verify_interface(self):
# Test the interface for the model.
dsp = self.factory.makeDistroSeriesParent()
verified = verifyObject(IDistroSeriesParent, dsp)
self.assertTrue(verified)
def test_properties(self):
# Test the model properties.
parent_series = self.factory.makeDistroSeries()
derived_series = self.factory.makeDistroSeries()
dsp = self.factory.makeDistroSeriesParent(
derived_series=derived_series,
parent_series=parent_series,
initialized=True)
self.assertThat(
dsp,
MatchesStructure.byEquality(
derived_series=derived_series,
parent_series=parent_series,
initialized=True,
is_overlay=False,
component=None,
pocket=None,
))
def test_properties_overlay(self):
# Test the model properties if the DSP represents an overlay.
parent_series = self.factory.makeDistroSeries()
derived_series = self.factory.makeDistroSeries()
universe_component = getUtility(IComponentSet).ensure('universe')
dsp = self.factory.makeDistroSeriesParent(
derived_series=derived_series,
parent_series=parent_series,
initialized=True,
is_overlay=True,
component=universe_component,
pocket=PackagePublishingPocket.SECURITY,
)
self.assertThat(
dsp,
MatchesStructure.byEquality(
derived_series=derived_series,
parent_series=parent_series,
initialized=True,
is_overlay=True,
component=universe_component,
pocket=PackagePublishingPocket.SECURITY,
))
def test_getByDerivedSeries(self):
parent_series = self.factory.makeDistroSeries()
derived_series = self.factory.makeDistroSeries()
self.factory.makeDistroSeriesParent(
derived_series, parent_series)
results = getUtility(IDistroSeriesParentSet).getByDerivedSeries(
derived_series)
self.assertEqual(1, results.count())
self.assertEqual(parent_series, results[0].parent_series)
# Making a second parent should add it to the results.
self.factory.makeDistroSeriesParent(
derived_series, self.factory.makeDistroSeries())
results = getUtility(IDistroSeriesParentSet).getByDerivedSeries(
derived_series)
self.assertEqual(2, results.count())
def test_getByParentSeries(self):
parent_series = self.factory.makeDistroSeries()
derived_series = self.factory.makeDistroSeries()
self.factory.makeDistroSeriesParent(
derived_series, parent_series)
results = getUtility(IDistroSeriesParentSet).getByParentSeries(
parent_series)
self.assertEqual(1, results.count())
self.assertEqual(derived_series, results[0].derived_series)
# Making a second child should add it to the results.
self.factory.makeDistroSeriesParent(
self.factory.makeDistroSeries(), parent_series)
results = getUtility(IDistroSeriesParentSet).getByParentSeries(
parent_series)
self.assertEqual(2, results.count())
class TestDistroSeriesParentSecurity(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_random_person_is_unauthorized(self):
dsp = self.factory.makeDistroSeriesParent()
person = self.factory.makePerson()
with person_logged_in(person):
self.assertRaises(
Unauthorized,
setattr, dsp, "derived_series", dsp.parent_series)
def assertCanEdit(self, dsp):
dsp.initialized = False
self.assertEquals(False, dsp.initialized)
def test_distroseries_drivers_can_edit(self):
# Test that distroseries drivers can edit the data.
dsp = self.factory.makeDistroSeriesParent()
person = self.factory.makePerson()
login(LAUNCHPAD_ADMIN)
dsp.derived_series.driver = person
with person_logged_in(person):
self.assertCanEdit(dsp)
def test_admins_can_edit(self):
dsp = self.factory.makeDistroSeriesParent()
login(LAUNCHPAD_ADMIN)
self.assertCanEdit(dsp)
def test_distro_owners_can_edit(self):
dsp = self.factory.makeDistroSeriesParent()
person = self.factory.makePerson()
login(LAUNCHPAD_ADMIN)
dsp.derived_series.distribution.owner = person
with person_logged_in(person):
self.assertCanEdit(dsp)
class TestOverlayTree(TestCaseWithFactory):
"""Test the overlay tree."""
layer = DatabaseFunctionalLayer
def test_getFlattenedOverlayTree(self):
#
# series
# |
# ----------------------------------
# | | | |
# o o | o
# | | | |
# parent11 parent21 parent31 parent41
# | |
# o o
# | | type of relation:
# parent12 parent22 | |
# | | o
# | | |
# | no overlay overlay
# parent13
#
distroseries = self.factory.makeDistroSeries()
parent11 = self.factory.makeDistroSeries()
parent12 = self.factory.makeDistroSeries()
parent21 = self.factory.makeDistroSeries()
universe_component = getUtility(IComponentSet).ensure('universe')
# series -> parent11
dsp_series_parent11 = self.factory.makeDistroSeriesParent(
derived_series=distroseries, parent_series=parent11,
initialized=True, is_overlay=True,
pocket=PackagePublishingPocket.RELEASE,
component=universe_component)
# parent11 -> parent12
dsp_parent11_parent12 = self.factory.makeDistroSeriesParent(
derived_series=parent11, parent_series=parent12,
initialized=True, is_overlay=True,
pocket=PackagePublishingPocket.RELEASE,
component=universe_component)
# parent12 -> parent13
self.factory.makeDistroSeriesParent(derived_series=parent12,
initialized=True, is_overlay=False)
# series -> parent21
dsp_series_parent21 = self.factory.makeDistroSeriesParent(
derived_series=distroseries, parent_series=parent21,
initialized=True, is_overlay=True,
pocket=PackagePublishingPocket.RELEASE,
component=universe_component)
# parent21 -> parent22
dsp_parent21_parent22 = self.factory.makeDistroSeriesParent(
derived_series=parent21, initialized=True, is_overlay=True,
pocket=PackagePublishingPocket.RELEASE,
component=universe_component)
# series -> parent31
self.factory.makeDistroSeriesParent(derived_series=distroseries,
initialized=True, is_overlay=False)
# series -> parent41
dsp_series_parent41 = self.factory.makeDistroSeriesParent(
derived_series=distroseries, initialized=True, is_overlay=True,
pocket=PackagePublishingPocket.RELEASE,
component=universe_component)
overlays = getUtility(
IDistroSeriesParentSet).getFlattenedOverlayTree(distroseries)
self.assertContentEqual(
[dsp_series_parent11, dsp_parent11_parent12, dsp_series_parent21,
dsp_parent21_parent22, dsp_series_parent41],
overlays)
def test_getFlattenedOverlayTree_empty(self):
distroseries = self.factory.makeDistroSeries()
self.factory.makeDistroSeriesParent(derived_series=distroseries,
initialized=True, is_overlay=False)
overlays = getUtility(
IDistroSeriesParentSet).getFlattenedOverlayTree(distroseries)
self.assertTrue(overlays.is_empty())
| agpl-3.0 |
gregtampa/coreemu | daemon/core/emane/nodes.py | 7 | 12795 | #
# CORE
# Copyright (c)2010-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
nodes.py: definition of an EmaneNode class for implementing configuration
control of an EMANE emulation. An EmaneNode has several attached NEMs that
share the same MAC+PHY model.
'''
import sys
import os.path
from core.api import coreapi
from core.coreobj import PyCoreNet
try:
from emanesh.events import EventService
from emanesh.events import LocationEvent
except Exception, e:
pass
try:
import emaneeventservice
import emaneeventlocation
except Exception, e:
''' Don't require all CORE users to have EMANE libeventservice and its
Python bindings installed.
'''
pass
class EmaneNet(PyCoreNet):
''' EMANE network base class.
'''
apitype = coreapi.CORE_NODE_EMANE
linktype = coreapi.CORE_LINK_WIRELESS
type = "wlan" # icon used
class EmaneNode(EmaneNet):
''' EMANE node contains NEM configuration and causes connected nodes
to have TAP interfaces (instead of VEth). These are managed by the
Emane controller object that exists in a session.
'''
def __init__(self, session, objid = None, name = None, verbose = False,
start = True):
PyCoreNet.__init__(self, session, objid, name, verbose, start)
self.verbose = verbose
self.conf = ""
self.up = False
self.nemidmap = {}
self.model = None
self.mobility = None
def linkconfig(self, netif, bw = None, delay = None,
loss = None, duplicate = None, jitter = None, netif2 = None):
''' The CommEffect model supports link configuration.
'''
if not self.model:
return
return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss,
duplicate=duplicate, jitter=jitter, netif2=netif2)
def config(self, conf):
#print "emane", self.name, "got config:", conf
self.conf = conf
def shutdown(self):
pass
def link(self, netif1, netif2):
pass
def unlink(self, netif1, netif2):
pass
def setmodel(self, model, config):
''' set the EmaneModel associated with this node
'''
if (self.verbose):
self.info("adding model %s" % model._name)
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
# EmaneModel really uses values from ConfigurableManager
# when buildnemxml() is called, not during init()
self.model = model(session=self.session, objid=self.objid,
verbose=self.verbose)
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
self.mobility = model(session=self.session, objid=self.objid,
verbose=self.verbose, values=config)
def setnemid(self, netif, nemid):
''' Record an interface to numerical ID mapping. The Emane controller
object manages and assigns these IDs for all NEMs.
'''
self.nemidmap[netif] = nemid
def getnemid(self, netif):
''' Given an interface, return its numerical ID.
'''
if netif not in self.nemidmap:
return None
else:
return self.nemidmap[netif]
def getnemnetif(self, nemid):
''' Given a numerical NEM ID, return its interface. This returns the
first interface that matches the given NEM ID.
'''
for netif in self.nemidmap:
if self.nemidmap[netif] == nemid:
return netif
return None
def netifs(self, sort=True):
''' Retrieve list of linked interfaces sorted by node number.
'''
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
def buildplatformxmlentry(self, doc):
''' Return a dictionary of XML elements describing the NEMs
connected to this EmaneNode for inclusion in the platform.xml file.
'''
ret = {}
if self.model is None:
self.info("warning: EmaneNode %s has no associated model" % \
self.name)
return ret
for netif in self.netifs():
# <nem name="NODE-001" definition="rfpipenem.xml">
nementry = self.model.buildplatformxmlnementry(doc, self, netif)
# <transport definition="transvirtual.xml" group="1">
# <param name="device" value="n1.0.158" />
# </transport>
trans = self.model.buildplatformxmltransportentry(doc, self, netif)
nementry.appendChild(trans)
ret[netif] = nementry
return ret
def buildnemxmlfiles(self, emane):
''' Let the configured model build the necessary nem, mac, and phy
XMLs.
'''
if self.model is None:
return
# build XML for overall network (EmaneNode) configs
self.model.buildnemxmlfiles(emane, ifc=None)
# build XML for specific interface (NEM) configs
need_virtual = False
need_raw = False
vtype = "virtual"
rtype = "raw"
for netif in self.netifs():
self.model.buildnemxmlfiles(emane, netif)
if "virtual" in netif.transport_type:
need_virtual = True
vtype = netif.transport_type
else:
need_raw = True
rtype = netif.transport_type
# build transport XML files depending on type of interfaces involved
if need_virtual:
self.buildtransportxml(emane, vtype)
if need_raw:
self.buildtransportxml(emane, rtype)
def buildtransportxml(self, emane, type):
''' Write a transport XML file for the Virtual or Raw Transport.
'''
transdoc = emane.xmldoc("transport")
trans = transdoc.getElementsByTagName("transport").pop()
trans.setAttribute("name", "%s Transport" % type.capitalize())
trans.setAttribute("library", "trans%s" % type.lower())
trans.appendChild(emane.xmlparam(transdoc, "bitrate", "0"))
flowcontrol = False
names = self.model.getnames()
values = emane.getconfig(self.objid, self.model._name,
self.model.getdefaultvalues())[1]
if "flowcontrolenable" in names and values:
i = names.index("flowcontrolenable")
if self.model.booltooffon(values[i]) == "on":
flowcontrol = True
if "virtual" in type.lower():
if os.path.exists("/dev/net/tun_flowctl"):
trans.appendChild(emane.xmlparam(transdoc, "devicepath",
"/dev/net/tun_flowctl"))
else:
trans.appendChild(emane.xmlparam(transdoc, "devicepath",
"/dev/net/tun"))
if flowcontrol:
trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable",
"on"))
emane.xmlwrite(transdoc, self.transportxmlname(type.lower()))
def transportxmlname(self, type):
''' Return the string name for the Transport XML file,
e.g. 'n3transvirtual.xml'
'''
return "n%strans%s.xml" % (self.objid, type)
def installnetifs(self, do_netns=True):
''' Install TAP devices into their namespaces. This is done after
EMANE daemons have been started, because that is their only chance
to bind to the TAPs.
'''
if not self.session.emane.doeventmonitor() and \
self.session.emane.service is None:
warntxt = "unable to publish EMANE events because the eventservice "
warntxt += "Python bindings failed to load"
self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.name,
self.objid, warntxt)
for netif in self.netifs():
if do_netns and "virtual" in netif.transport_type.lower():
netif.install()
netif.setaddrs()
# if we are listening for EMANE events, don't generate them
if self.session.emane.doeventmonitor():
netif.poshook = None
continue
# at this point we register location handlers for generating
# EMANE location events
netif.poshook = self.setnemposition
(x,y,z) = netif.node.position.get()
self.setnemposition(netif, x, y, z)
def deinstallnetifs(self):
''' Uninstall TAP devices. This invokes their shutdown method for
any required cleanup; the device may be actually removed when
emanetransportd terminates.
'''
for netif in self.netifs():
if "virtual" in netif.transport_type.lower():
netif.shutdown()
netif.poshook = None
def setnemposition(self, netif, x, y, z):
''' Publish a NEM location change event using the EMANE event service.
'''
if self.session.emane.service is None:
if self.verbose:
self.info("position service not available")
return
nemid = self.getnemid(netif)
ifname = netif.localname
if nemid is None:
self.info("nemid for %s is unknown" % ifname)
return
(lat, long, alt) = self.session.location.getgeo(x, y, z)
if self.verbose:
self.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)"
"(%.6f,%.6f,%.6f)" % \
(ifname, nemid, x, y, z, lat, long, alt))
if self.session.emane.version >= self.session.emane.EMANE091:
event = LocationEvent()
else:
event = emaneeventlocation.EventLocation(1)
# altitude must be an integer or warning is printed
# unused: yaw, pitch, roll, azimuth, elevation, velocity
alt = int(round(alt))
if self.session.emane.version >= self.session.emane.EMANE091:
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
self.session.emane.service.publish(0, event)
else:
event.set(0, nemid, lat, long, alt)
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
emaneeventservice.PLATFORMID_ANY,
emaneeventservice.NEMID_ANY,
emaneeventservice.COMPONENTID_ANY,
event.export())
def setnempositions(self, moved_netifs):
''' Several NEMs have moved, from e.g. a WaypointMobilityModel
calculation. Generate an EMANE Location Event having several
entries for each netif that has moved.
'''
if len(moved_netifs) == 0:
return
if self.session.emane.service is None:
if self.verbose:
self.info("position service not available")
return
if self.session.emane.version >= self.session.emane.EMANE091:
event = LocationEvent()
else:
event = emaneeventlocation.EventLocation(len(moved_netifs))
i = 0
for netif in moved_netifs:
nemid = self.getnemid(netif)
ifname = netif.localname
if nemid is None:
self.info("nemid for %s is unknown" % ifname)
continue
(x, y, z) = netif.node.getposition()
(lat, long, alt) = self.session.location.getgeo(x, y, z)
if self.verbose:
self.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)"
"(%.6f,%.6f,%.6f)" % \
(i, ifname, nemid, x, y, z, lat, long, alt))
# altitude must be an integer or warning is printed
alt = int(round(alt))
if self.session.emane.version >= self.session.emane.EMANE091:
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
else:
event.set(i, nemid, lat, long, alt)
i += 1
if self.session.emane.version >= self.session.emane.EMANE091:
self.session.emane.service.publish(0, event)
else:
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
emaneeventservice.PLATFORMID_ANY,
emaneeventservice.NEMID_ANY,
emaneeventservice.COMPONENTID_ANY,
event.export())
| bsd-2-clause |
denisff/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/X509CertChain.py | 238 | 6861 | """Class representing an X.509 certificate chain."""
from utils import cryptomath
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True | apache-2.0 |
charleswhchan/ansible | lib/ansible/plugins/lookup/inventory_hostnames.py | 117 | 1942 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Steven Dossett <sdossett@panath.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
from ansible.inventory import Inventory
class LookupModule(LookupBase):
def get_hosts(self, variables, pattern):
hosts = []
if pattern[0] in ('!','&'):
obj = pattern[1:]
else:
obj = pattern
if obj in variables['groups']:
hosts = variables['groups'][obj]
elif obj in variables['groups']['all']:
hosts = [obj]
return hosts
def run(self, terms, variables=None, **kwargs):
host_list = []
for term in terms:
patterns = Inventory.order_patterns(Inventory.split_host_pattern(term))
for p in patterns:
that = self.get_hosts(variables, p)
if p.startswith("!"):
host_list = [ h for h in host_list if h not in that]
elif p.startswith("&"):
host_list = [ h for h in host_list if h in that ]
else:
host_list.extend(that)
# return unique list
return list(set(host_list))
| gpl-3.0 |
r0b0/virt-manager | src/virtManagerTui/createmeter.py | 3 | 1237 | # createmeter.py - Copyright (C) 2009 Red Hat, Inc.
# Written by Darryl L. Pierce <dpierce@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
import urlgrabber.progress as progress
import logging
class CreateMeter(progress.BaseMeter):
def _do_start(self, now=None):
logging.info("Starting...")
def _do_end(self, amount_read, now=None):
logging.info("Ending: read=%d", amount_read)
def _do_update(self, amount_read, now=None):
logging.info("Update: read=%d", amount_read)
| gpl-2.0 |
HellerCommaA/flask-angular | lib/python2.7/site-packages/setuptools/command/test.py | 285 | 5932 | from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import *
from pkg_resources import _namespace_packages
from unittest import TestLoader, main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__!='setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file!='__init__.py':
submodule = module.__name__+'.'+file[:-3]
else:
if resource_exists(
module.__name__, file+'/__init__.py'
):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests)!=1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module+".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution,'test_loader',None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
def with_project_on_sys_path(self, func):
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
import unittest
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
loader_ep = EntryPoint.parse("x="+self.test_loader)
loader_class = loader_ep.load(require=False)
cks = loader_class()
unittest.main(
None, None, [unittest.__file__]+self.test_args,
testLoader = cks
)
| mit |
jsoref/django | tests/multiple_database/routers.py | 379 | 1927 | from __future__ import unicode_literals
from django.db import DEFAULT_DB_ALIAS
class TestRouter(object):
"""
Vaguely behave like primary/replica, but the databases aren't assumed to
propagate changes.
"""
def db_for_read(self, model, instance=None, **hints):
if instance:
return instance._state.db or 'other'
return 'other'
def db_for_write(self, model, **hints):
return DEFAULT_DB_ALIAS
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in ('default', 'other') and obj2._state.db in ('default', 'other')
def allow_migrate(self, db, app_label, **hints):
return True
class AuthRouter(object):
"""
Control all database operations on models in the contrib.auth application.
"""
def db_for_read(self, model, **hints):
"Point all read operations on auth models to 'default'"
if model._meta.app_label == 'auth':
# We use default here to ensure we can tell the difference
# between a read request and a write request for Auth objects
return 'default'
return None
def db_for_write(self, model, **hints):
"Point all operations on auth models to 'other'"
if model._meta.app_label == 'auth':
return 'other'
return None
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation if a model in Auth is involved"
if obj1._meta.app_label == 'auth' or obj2._meta.app_label == 'auth':
return True
return None
def allow_migrate(self, db, app_label, **hints):
"Make sure the auth app only appears on the 'other' db"
if app_label == 'auth':
return db == 'other'
return None
class WriteRouter(object):
# A router that only expresses an opinion on writes
def db_for_write(self, model, **hints):
return 'writer'
| bsd-3-clause |
ulope/django | django/contrib/sitemaps/tests/urls/http.py | 26 | 4318 | from datetime import date, datetime
from django.conf.urls import url
from django.conf.urls.i18n import i18n_patterns
from django.contrib.sitemaps import Sitemap, GenericSitemap, FlatPageSitemap, views
from django.http import HttpResponse
from django.utils import timezone
from django.views.decorators.cache import cache_page
from django.contrib.sitemaps.tests.base import I18nTestModel, TestModel
class SimpleSitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
lastmod = datetime.now()
def items(self):
return [object()]
class SimpleI18nSitemap(Sitemap):
changefreq = "never"
priority = 0.5
i18n = True
def items(self):
return I18nTestModel.objects.all()
class EmptySitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
def items(self):
return []
class FixedLastmodSitemap(SimpleSitemap):
lastmod = datetime(2013, 3, 13, 10, 0, 0)
class FixedLastmodMixedSitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
loop = 0
def items(self):
o1 = TestModel()
o1.lastmod = datetime(2013, 3, 13, 10, 0, 0)
o2 = TestModel()
return [o1, o2]
class DateSiteMap(SimpleSitemap):
lastmod = date(2013, 3, 13)
class TimezoneSiteMap(SimpleSitemap):
lastmod = datetime(2013, 3, 13, 10, 0, 0, tzinfo=timezone.get_fixed_timezone(-300))
def testmodelview(request, id):
return HttpResponse()
simple_sitemaps = {
'simple': SimpleSitemap,
}
simple_i18nsitemaps = {
'simple': SimpleI18nSitemap,
}
empty_sitemaps = {
'empty': EmptySitemap,
}
fixed_lastmod_sitemaps = {
'fixed-lastmod': FixedLastmodSitemap,
}
fixed_lastmod__mixed_sitemaps = {
'fixed-lastmod-mixed': FixedLastmodMixedSitemap,
}
generic_sitemaps = {
'generic': GenericSitemap({'queryset': TestModel.objects.all()}),
}
flatpage_sitemaps = {
'flatpages': FlatPageSitemap,
}
urlpatterns = [
url(r'^simple/index\.xml$', views.index, {'sitemaps': simple_sitemaps}),
url(r'^simple/custom-index\.xml$', views.index,
{'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap_index.xml'}),
url(r'^simple/sitemap-(?P<section>.+)\.xml$', views.sitemap,
{'sitemaps': simple_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^simple/sitemap\.xml$', views.sitemap,
{'sitemaps': simple_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^simple/i18n\.xml$', views.sitemap,
{'sitemaps': simple_i18nsitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^simple/custom-sitemap\.xml$', views.sitemap,
{'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap.xml'},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^empty/sitemap\.xml$', views.sitemap,
{'sitemaps': empty_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^lastmod/sitemap\.xml$', views.sitemap,
{'sitemaps': fixed_lastmod_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^lastmod-mixed/sitemap\.xml$', views.sitemap,
{'sitemaps': fixed_lastmod__mixed_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^lastmod/date-sitemap.xml$', views.sitemap,
{'sitemaps': {'date-sitemap': DateSiteMap}},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^lastmod/tz-sitemap.xml$', views.sitemap,
{'sitemaps': {'tz-sitemap': TimezoneSiteMap}},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^generic/sitemap\.xml$', views.sitemap,
{'sitemaps': generic_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^flatpages/sitemap\.xml$', views.sitemap,
{'sitemaps': flatpage_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^cached/index\.xml$', cache_page(1)(views.index),
{'sitemaps': simple_sitemaps, 'sitemap_url_name': 'cached_sitemap'}),
url(r'^cached/sitemap-(?P<section>.+)\.xml', cache_page(1)(views.sitemap),
{'sitemaps': simple_sitemaps}, name='cached_sitemap')
]
urlpatterns += i18n_patterns(
url(r'^i18n/testmodel/(?P<id>\d+)/$', testmodelview, name='i18n_testmodel'),
)
| bsd-3-clause |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/idlelib/idle_test/test_idlehistory.py | 87 | 5465 | import unittest
from test.support import requires
import tkinter as tk
from tkinter import Text as tkText
from idlelib.idle_test.mock_tk import Text as mkText
from idlelib.IdleHistory import History
from idlelib.configHandler import idleConf
line1 = 'a = 7'
line2 = 'b = a'
class StoreTest(unittest.TestCase):
'''Tests History.__init__ and History.store with mock Text'''
@classmethod
def setUpClass(cls):
cls.text = mkText()
cls.history = History(cls.text)
def tearDown(self):
self.text.delete('1.0', 'end')
self.history.history = []
def test_init(self):
self.assertIs(self.history.text, self.text)
self.assertEqual(self.history.history, [])
self.assertIsNone(self.history.prefix)
self.assertIsNone(self.history.pointer)
self.assertEqual(self.history.cyclic,
idleConf.GetOption("main", "History", "cyclic", 1, "bool"))
def test_store_short(self):
self.history.store('a')
self.assertEqual(self.history.history, [])
self.history.store(' a ')
self.assertEqual(self.history.history, [])
def test_store_dup(self):
self.history.store(line1)
self.assertEqual(self.history.history, [line1])
self.history.store(line2)
self.assertEqual(self.history.history, [line1, line2])
self.history.store(line1)
self.assertEqual(self.history.history, [line2, line1])
def test_store_reset(self):
self.history.prefix = line1
self.history.pointer = 0
self.history.store(line2)
self.assertIsNone(self.history.prefix)
self.assertIsNone(self.history.pointer)
class TextWrapper:
def __init__(self, master):
self.text = tkText(master=master)
self._bell = False
def __getattr__(self, name):
return getattr(self.text, name)
def bell(self):
self._bell = True
class FetchTest(unittest.TestCase):
'''Test History.fetch with wrapped tk.Text.
'''
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = tk.Tk()
def setUp(self):
self.text = text = TextWrapper(self.root)
text.insert('1.0', ">>> ")
text.mark_set('iomark', '1.4')
text.mark_gravity('iomark', 'left')
self.history = History(text)
self.history.history = [line1, line2]
@classmethod
def tearDownClass(cls):
cls.root.destroy()
del cls.root
def fetch_test(self, reverse, line, prefix, index, *, bell=False):
# Perform one fetch as invoked by Alt-N or Alt-P
# Test the result. The line test is the most important.
# The last two are diagnostic of fetch internals.
History = self.history
History.fetch(reverse)
Equal = self.assertEqual
Equal(self.text.get('iomark', 'end-1c'), line)
Equal(self.text._bell, bell)
if bell:
self.text._bell = False
Equal(History.prefix, prefix)
Equal(History.pointer, index)
Equal(self.text.compare("insert", '==', "end-1c"), 1)
def test_fetch_prev_cyclic(self):
prefix = ''
test = self.fetch_test
test(True, line2, prefix, 1)
test(True, line1, prefix, 0)
test(True, prefix, None, None, bell=True)
def test_fetch_next_cyclic(self):
prefix = ''
test = self.fetch_test
test(False, line1, prefix, 0)
test(False, line2, prefix, 1)
test(False, prefix, None, None, bell=True)
# Prefix 'a' tests skip line2, which starts with 'b'
def test_fetch_prev_prefix(self):
prefix = 'a'
self.text.insert('iomark', prefix)
self.fetch_test(True, line1, prefix, 0)
self.fetch_test(True, prefix, None, None, bell=True)
def test_fetch_next_prefix(self):
prefix = 'a'
self.text.insert('iomark', prefix)
self.fetch_test(False, line1, prefix, 0)
self.fetch_test(False, prefix, None, None, bell=True)
def test_fetch_prev_noncyclic(self):
prefix = ''
self.history.cyclic = False
test = self.fetch_test
test(True, line2, prefix, 1)
test(True, line1, prefix, 0)
test(True, line1, prefix, 0, bell=True)
def test_fetch_next_noncyclic(self):
prefix = ''
self.history.cyclic = False
test = self.fetch_test
test(False, prefix, None, None, bell=True)
test(True, line2, prefix, 1)
test(False, prefix, None, None, bell=True)
test(False, prefix, None, None, bell=True)
def test_fetch_cursor_move(self):
# Move cursor after fetch
self.history.fetch(reverse=True) # initialization
self.text.mark_set('insert', 'iomark')
self.fetch_test(True, line2, None, None, bell=True)
def test_fetch_edit(self):
# Edit after fetch
self.history.fetch(reverse=True) # initialization
self.text.delete('iomark', 'insert', )
self.text.insert('iomark', 'a =')
self.fetch_test(True, line1, 'a =', 0) # prefix is reset
def test_history_prev_next(self):
# Minimally test functions bound to events
self.history.history_prev('dummy event')
self.assertEqual(self.history.pointer, 1)
self.history.history_next('dummy event')
self.assertEqual(self.history.pointer, None)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=2)
| lgpl-3.0 |
AxelDelmas/ansible | lib/ansible/plugins/inventory/directory.py | 93 | 2024 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from . aggregate import InventoryAggregateParser
class InventoryDirectoryParser(InventoryAggregateParser):
CONDITION="is_dir(%s)"
def __init__(self, inven_directory):
directory = inven_directory
names = os.listdir(inven_directory)
filtered_names = []
# Clean up the list of filenames
for filename in names:
# Skip files that end with certain extensions or characters
if any(filename.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
continue
# These are things inside of an inventory basedir
if filename in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(directory, filename)
new_names.append(fullpath)
super(InventoryDirectoryParser, self).__init__(new_names)
def parse(self):
return super(InventoryDirectoryParser, self).parse()
| gpl-3.0 |
johnkeepmoving/oss-ftp | python27/win32/Lib/idlelib/ZoomHeight.py | 130 | 1300 | # Sample extension: zoom a window to maximum height
import re
import sys
from idlelib import macosxSupport
class ZoomHeight:
menudefs = [
('windows', [
('_Zoom Height', '<<zoom-height>>'),
])
]
def __init__(self, editwin):
self.editwin = editwin
def zoom_height_event(self, event):
top = self.editwin.top
zoom_height(top)
def zoom_height(top):
geom = top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
if not m:
top.bell()
return
width, height, x, y = map(int, m.groups())
newheight = top.winfo_screenheight()
if sys.platform == 'win32':
newy = 0
newheight = newheight - 72
elif macosxSupport.isAquaTk():
# The '88' below is a magic number that avoids placing the bottom
# of the window below the panel on my machine. I don't know how
# to calculate the correct value for this with tkinter.
newy = 22
newheight = newheight - newy - 88
else:
#newy = 24
newy = 0
#newheight = newheight - 96
newheight = newheight - 88
if height >= newheight:
newgeom = ""
else:
newgeom = "%dx%d+%d+%d" % (width, newheight, x, newy)
top.wm_geometry(newgeom)
| mit |
hyperized/ansible | lib/ansible/modules/net_tools/basics/uri.py | 1 | 25199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
- For Windows targets, use the M(win_uri) module instead.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
type: str
required: true
dest:
description:
- A path of where to download the file to (if desired). If I(dest) is a
directory, the basename of the file on the remote server will be used.
type: path
url_username:
description:
- A username for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ user ]
url_password:
description:
- A password for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ password ]
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
to 'json' it will take an already formatted JSON string or convert a data structure
into JSON. If C(body_format) is set to 'form-urlencoded' it will convert a dictionary
or list of tuples into an 'application/x-www-form-urlencoded' string. (Added in v2.7)
type: raw
body_format:
description:
- The serialization format of the body. When set to C(json) or C(form-urlencoded), encodes the
body argument, if needed, and automatically sets the Content-Type header accordingly.
As of C(2.3) it is possible to override the `Content-Type` header, when
set to C(json) or C(form-urlencoded) via the I(headers) option.
type: str
choices: [ form-urlencoded, json, raw ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response.
- In more recent versions we do not restrict the method at the module level anymore
but it still must be a valid method accepted by the service handling the request.
type: str
default: GET
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result.
- Independently of this option, if the reported Content-type is "application/json", then the JSON is
always loaded into a key called C(json) in the dictionary results.
type: bool
default: no
force_basic_auth:
description:
- Force the sending of the Basic authentication header upon initial request.
- The library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail.
type: bool
default: no
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
type: str
choices: ['all', 'no', 'none', 'safe', 'urllib2', 'yes']
default: safe
creates:
description:
- A filename, when it already exists, this step will not be run.
type: path
removes:
description:
- A filename, when it does not exist, this step will not be run.
type: path
status_code:
description:
- A list of valid, numeric, HTTP status codes that signifies success of the request.
type: list
default: [ 200 ]
timeout:
description:
- The socket level timeout in seconds
type: int
default: 30
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash. As
of C(2.3) supplying C(Content-Type) here will override the header
generated by supplying C(json) or C(form-urlencoded) for I(body_format).
type: dict
version_added: '2.1'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
- Prior to 1.9.2 the code defaulted to C(no).
type: bool
default: yes
version_added: '1.9.2'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, I(client_key) is not required
type: path
version_added: '2.4'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If I(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
src:
description:
- Path to file to be submitted to the remote server.
- Cannot be used with I(body).
type: path
version_added: '2.7'
remote_src:
description:
- If C(no), the module will search for src on originating/master machine.
- If C(yes) the module will use the C(src) path on the remote/target machine.
type: bool
default: no
version_added: '2.7'
force:
description:
- If C(yes) do not get a cached copy.
- Alias C(thirsty) has been deprecated and will be removed in 2.13.
type: bool
default: no
aliases: [ thirsty ]
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
unix_socket:
description:
- Path to Unix domain socket to use for connection
version_added: '2.8'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
notes:
- The dependency on httplib2 was removed in Ansible 2.1.
- The module returns all the HTTP headers in lower-case.
- For Windows targets, use the M(win_uri) module instead.
seealso:
- module: get_url
- module: win_uri
author:
- Romeo Theriault (@romeotheriault)
extends_documentation_fragment: files
'''
EXAMPLES = r'''
- name: Check that you can connect (GET) to a page and it returns a status 200
uri:
url: http://www.example.com
- name: Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents
uri:
url: http://www.example.com
return_content: yes
register: this
failed_when: "'AWESOME' not in this.content"
- name: Create a JIRA issue
uri:
url: https://your.jira.example.com/rest/api/2/issue/
user: your_username
password: your_pass
method: POST
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
name: your_username
password: your_password
enter: Sign in
status_code: 302
register: login
- name: Login to a form based webpage using a list of tuples
uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
- [ name, your_username ]
- [ password, your_password ]
- [ enter, Sign in ]
status_code: 302
register: login
- name: Connect to website using a previously stored cookie
uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
headers:
Cookie: "{{ login.set_cookie }}"
- name: Queue build of a project in Jenkins
uri:
url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
method: GET
force_basic_auth: yes
status_code: 201
- name: POST from contents of local file
uri:
url: https://httpbin.org/post
method: POST
src: file.json
- name: POST from contents of remote file
uri:
url: https://httpbin.org/post
method: POST
src: /path/to/my/file.json
remote_src: yes
- name: Pause play until a URL is reachable from this host
uri:
url: "http://192.0.2.1/some/test"
follow_redirects: none
method: GET
register: _result
until: _result.status == 200
retries: 720 # 720 * 5 seconds = 1hour (60*60/5)
delay: 5 # Every 5 seconds
# There are issues in a supporting Python library that is discussed in
# https://github.com/ansible/ansible/issues/52705 where a proxy is defined
# but you want to bypass proxy use on CIDR masks by using no_proxy
- name: Work around a python issue that doesn't support no_proxy envvar
uri:
follow_redirects: none
validate_certs: false
timeout: 5
url: "http://{{ ip_address }}:{{ port | default(80) }}"
register: uri_data
failed_when: false
changed_when: false
vars:
ip_address: 192.0.2.1
environment: |
{
{% for no_proxy in (lookup('env', 'no_proxy') | regex_replace('\s*,\s*', ' ') ).split() %}
{% if no_proxy | regex_search('\/') and
no_proxy | ipaddr('net') != '' and
no_proxy | ipaddr('net') != false and
ip_address | ipaddr(no_proxy) is not none and
ip_address | ipaddr(no_proxy) != false %}
'no_proxy': '{{ ip_address }}'
{% elif no_proxy | regex_search(':') != '' and
no_proxy | regex_search(':') != false and
no_proxy == ip_address + ':' + (port | default(80)) %}
'no_proxy': '{{ ip_address }}:{{ port | default(80) }}'
{% elif no_proxy | ipaddr('host') != '' and
no_proxy | ipaddr('host') != false and
no_proxy == ip_address %}
'no_proxy': '{{ ip_address }}'
{% elif no_proxy | regex_search('^(\*|)\.') != '' and
no_proxy | regex_search('^(\*|)\.') != false and
no_proxy | regex_replace('\*', '') in ip_address %}
'no_proxy': '{{ ip_address }}'
{% endif %}
{% endfor %}
}
'''
RETURN = r'''
# The return information includes all the HTTP headers in lower-case.
elapsed:
description: The number of seconds that elapsed while performing the download
returned: on success
type: int
sample: 23
msg:
description: The HTTP message from the request
returned: always
type: str
sample: OK (unknown bytes)
redirected:
description: Whether the request was redirected
returned: on success
type: bool
sample: false
status:
description: The HTTP status code from the request
returned: always
type: int
sample: 200
url:
description: The actual URL used for the request
returned: always
type: str
sample: https://www.ansible.com/
'''
import cgi
import datetime
import json
import os
import re
import shutil
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import PY2, iteritems, string_types
from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.urls import fetch_url, url_argument_spec
JSON_CANDIDATES = ('text', 'json', 'javascript')
def format_message(err, resp):
msg = resp.pop('msg')
return err + (' %s' % msg if msg else '')
def write_file(module, url, dest, content, resp):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception as e:
os.remove(tmpsrc)
msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
module.fail_json(msg=msg, **resp)
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
msg = format_message("Source '%s' does not exist" % tmpsrc, resp)
module.fail_json(msg=msg, **resp)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
msg = format_message("Source '%s' not readable" % tmpsrc, resp)
module.fail_json(msg=msg, **resp)
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
msg = format_message("Destination '%s' not writable" % dest, resp)
module.fail_json(msg=msg, **resp)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
msg = format_message("Destination '%s' not readable" % dest, resp)
module.fail_json(msg=msg, **resp)
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
msg = format_message("Destination dir '%s' not writable" % os.path.dirname(dest), resp)
module.fail_json(msg=msg, **resp)
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception as e:
os.remove(tmpsrc)
msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
module.fail_json(msg=msg, **resp)
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def kv_list(data):
''' Convert data into a list of key-value tuples '''
if data is None:
return None
if isinstance(data, Sequence):
return list(data)
if isinstance(data, Mapping):
return list(data.items())
raise TypeError('cannot form-urlencode body, expect list or dict')
def form_urlencoded(body):
''' Convert data into a form-urlencoded string '''
if isinstance(body, string_types):
return body
if isinstance(body, (Mapping, Sequence)):
result = []
# Turn a list of lists into a list of tupples that urlencode accepts
for key, values in kv_list(body):
if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
values = [values]
for value in values:
if value is not None:
result.append((to_text(key), to_text(value)))
return urlencode(result, doseq=True)
return body
def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
src = module.params['src']
if src:
try:
headers.update({
'Content-Length': os.stat(src).st_size
})
data = open(src, 'rb')
except OSError:
module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
else:
data = body
kwargs = {}
if dest is not None:
# Stash follow_redirects, in this block we don't want to follow
# we'll reset back to the supplied value soon
follow_redirects = module.params['follow_redirects']
module.params['follow_redirects'] = False
if os.path.isdir(dest):
# first check if we are redirected to a file download
_, redir_info = fetch_url(module, url, data=body,
headers=headers,
method=method,
timeout=socket_timeout, unix_socket=module.params['unix_socket'])
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
if redir_info['status'] in (301, 302, 303, 307):
url = redir_info['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
kwargs['last_mod_time'] = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
# Reset follow_redirects back to the stashed value
module.params['follow_redirects'] = follow_redirects
resp, info = fetch_url(module, url, data=data, headers=headers,
method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
**kwargs)
try:
content = resp.read()
except AttributeError:
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', '')
if src:
# Try to close the open file handle
try:
data.close()
except Exception:
pass
r['redirected'] = redirected or info['url'] != url
r.update(redir_info)
r.update(info)
return r, content, dest
def main():
argument_spec = url_argument_spec()
argument_spec.update(
dest=dict(type='path'),
url_username=dict(type='str', aliases=['user']),
url_password=dict(type='str', aliases=['password'], no_log=True),
body=dict(type='raw'),
body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw']),
src=dict(type='path'),
method=dict(type='str', default='GET'),
return_content=dict(type='bool', default=False),
follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
creates=dict(type='path'),
removes=dict(type='path'),
status_code=dict(type='list', default=[200]),
timeout=dict(type='int', default=30),
headers=dict(type='dict', default={}),
unix_socket=dict(type='path'),
)
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
mutually_exclusive=[['body', 'src']],
)
if module.params.get('thirsty'):
module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead', version='2.13')
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method'].upper()
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
dict_headers = module.params['headers']
if not re.match('^[A-Z]+$', method):
module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
if body_format == 'json':
# Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, string_types):
body = json.dumps(body)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/json'
elif body_format == 'form-urlencoded':
if not isinstance(body, string_types):
try:
body = form_urlencoded(body)
except ValueError as e:
module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
# Make the request
start = datetime.datetime.utcnow()
resp, content, dest = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout)
resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds
resp['status'] = int(resp['status'])
resp['changed'] = False
# Write the file out if requested
if dest is not None:
if resp['status'] in status_code and resp['status'] != 304:
write_file(module, url, dest, content, resp)
# allow file attribute changes
resp['changed'] = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
resp['path'] = dest
# Transmogrify the headers, replacing '-' with '_', since variables don't
# work with dashes.
# In python3, the headers are title cased. Lowercase them to be
# compatible with the python2 behaviour.
uresp = {}
for key, value in iteritems(resp):
ukey = key.replace("-", "_").lower()
uresp[ukey] = value
if 'location' in uresp:
uresp['location'] = absolute_location(url, uresp['location'])
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
# Handle multiple Content-Type headers
charsets = []
content_types = []
for value in uresp['content_type'].split(','):
ct, params = cgi.parse_header(value)
if ct not in content_types:
content_types.append(ct)
if 'charset' in params:
if params['charset'] not in charsets:
charsets.append(params['charset'])
if content_types:
content_type = content_types[0]
if len(content_types) > 1:
module.warn(
'Received multiple conflicting Content-Type values (%s), using %s' % (', '.join(content_types), content_type)
)
if charsets:
content_encoding = charsets[0]
if len(charsets) > 1:
module.warn(
'Received multiple conflicting charset values (%s), using %s' % (', '.join(charsets), content_encoding)
)
u_content = to_text(content, encoding=content_encoding)
if any(candidate in content_type for candidate in JSON_CANDIDATES):
try:
js = json.loads(u_content)
uresp['json'] = js
except Exception:
if PY2:
sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
else:
u_content = to_text(content, encoding=content_encoding)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
module.fail_json(content=u_content, **uresp)
elif return_content:
module.exit_json(content=u_content, **uresp)
else:
module.exit_json(**uresp)
if __name__ == '__main__':
main()
| gpl-3.0 |
memeticlabs/mongokit | listembed-test.py | 1 | 4479 | try:
import unittest2 as unittest
except ImportError:
import unittest
from mongokit import Document, Connection
class DescriptorsTestCase(unittest.TestCase):
def setUp(self):
self.connection = Connection()
self.col = self.connection['test']['mongokit']
def tearDown(self):
self.connection.drop_database('test')
def test_list_embed_dot_notation(self):
"""Attempt to set a default for a sub element using dot notation
Either this or test_list_embed_list_notation should pass
"""
class ListEmbed(Document):
use_dot_notation = True
structure = {
'list': [
{
'name': basestring,
'age': int
}
]
}
default_values = {
'list.name': 'default'
}
self.connection.register([ListEmbed])
doc = self.col.ListEmbed()
self.assertDictEqual(doc, {'list': []})
doc.list.append({'age': 23})
self.assertDictEqual(
doc, {
'list': [
{
'name': 'default',
'age': 23
}
]
}
)
def test_list_embed_list_notation(self):
"""Attempt to set a default for a sub element using list notation
Either this or test_list_embed_dot_notation should pass
"""
class ListEmbed(Document):
use_dot_notation = True
structure = {
'list': [
{
'name': basestring,
'age': int
}
]
}
default_values = {
'list': [
{
'name': 'default'
}
]
}
self.connection.register([ListEmbed])
doc = self.col.ListEmbed()
self.assertDictEqual(doc, {'list': []})
doc.list.append({'age': 23})
self.assertDictEqual(
doc, {
'list': [
{
'name': 'default',
'age': 23
}
]
}
)
def test_list_embed_non_required_fields(self):
"""Confirm all fields are not required"""
class ListEmbed(Document):
use_dot_notation = True
structure = {
'list': [
{
'name': basestring,
'age': int
}
]
}
self.connection.register([ListEmbed])
doc = self.col.ListEmbed()
self.assertDictEqual(doc, {'list': []})
doc.list.append({'age': 23})
self.assertDictEqual(
doc, {
'list': [
{
'age': 23
}
]
}
)
# Should validate fine
doc.validate()
def test_list_embed_required_fields_dot_notation(self):
"""Confirm list of object required field validation works"""
class ListEmbed(Document):
use_dot_notation = True
structure = {
'list': [
{
'name': basestring,
'age': int
}
]
}
required_fields = ['list.name']
self.connection.register([ListEmbed])
doc = self.col.ListEmbed()
self.assertDictEqual(doc, {'list': []})
doc.list = [{'name': 'bob'}]
self.assertDictEqual(
doc, {
'list': [
{
'name': 'bob'
}
]
}
)
# Should validate fine
doc.validate()
doc.list = [{'age': 23}]
self.assertDictEqual(
doc, {
'list': [
{
'age': 23
}
]
}
)
try:
doc.validate()
self.fail('Not a valid document')
except:
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
TheWardoctor/Wardoctors-repo | script.module.uncoded/lib/resources/lib/modules/sources.py | 1 | 56371 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys,re,json,urllib,urlparse,random,datetime,time
from resources.lib.modules import trakt
from resources.lib.modules import tvmaze
from resources.lib.modules import cache
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import workers
from resources.lib.modules import source_utils
from resources.lib.modules import log_utils
from resources.lib.modules import thexem
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
try: import urlresolver
except: pass
try: import xbmc
except: pass
class sources:
def __init__(self):
self.getConstants()
self.sources = []
def play(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select):
try:
url = None
items = self.getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
select = control.setting('hosts.mode') if select == None else select
title = tvshowtitle if not tvshowtitle == None else title
if control.window.getProperty('PseudoTVRunning') == 'True':
return control.resolve(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items))))
if len(items) > 0:
if select == '1' and 'plugin' in control.infoLabel('Container.PluginName'):
control.window.clearProperty(self.itemProperty)
control.window.setProperty(self.itemProperty, json.dumps(items))
control.window.clearProperty(self.metaProperty)
control.window.setProperty(self.metaProperty, meta)
control.sleep(200)
return control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], urllib.quote_plus(title)))
elif select == '0' or select == '1':
url = self.sourcesDialog(items)
else:
url = self.sourcesDirect(items)
if url == None:
return self.errorForSources()
try: meta = json.loads(meta)
except: pass
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, url, meta)
except:
pass
def addItem(self, title):
control.playlist.clear()
items = control.window.getProperty(self.itemProperty)
items = json.loads(items)
if items == None or len(items) == 0: control.idle() ; sys.exit()
meta = control.window.getProperty(self.metaProperty)
meta = json.loads(meta)
# (Kodi bug?) [name,role] is incredibly slow on this directory, [name] is barely tolerable, so just nuke it for speed!
if 'cast' in meta: del(meta['cast'])
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
downloads = True if control.setting('downloads') == 'true' and not (control.setting('movie.download.path') == '' or control.setting('tv.download.path') == '') else False
systitle = sysname = urllib.quote_plus(title)
if 'tvshowtitle' in meta and 'season' in meta and 'episode' in meta:
sysname += urllib.quote_plus(' S%02dE%02d' % (int(meta['season']), int(meta['episode'])))
elif 'year' in meta:
sysname += urllib.quote_plus(' (%s)' % meta['year'])
poster = meta['poster3'] if 'poster3' in meta else '0'
if poster == '0': poster = meta['poster'] if 'poster' in meta else '0'
fanart = meta['fanart2'] if 'fanart2' in meta else '0'
if fanart == '0': fanart = meta['fanart'] if 'fanart' in meta else '0'
thumb = meta['thumb'] if 'thumb' in meta else '0'
if thumb == '0': thumb = poster
if thumb == '0': thumb = fanart
banner = meta['banner'] if 'banner' in meta else '0'
if banner == '0': banner = poster
if poster == '0': poster = control.addonPoster()
if banner == '0': banner = control.addonBanner()
if not control.setting('fanart') == 'true': fanart = '0'
if fanart == '0': fanart = control.addonFanart()
if thumb == '0': thumb = control.addonFanart()
sysimage = urllib.quote_plus(poster.encode('utf-8'))
downloadMenu = control.lang(32403).encode('utf-8')
for i in range(len(items)):
try:
label = items[i]['label']
syssource = urllib.quote_plus(json.dumps([items[i]]))
sysurl = '%s?action=playItem&title=%s&source=%s' % (sysaddon, systitle, syssource)
cm = []
if downloads == True:
cm.append((downloadMenu, 'RunPlugin(%s?action=download&name=%s&image=%s&source=%s)' % (sysaddon, sysname, sysimage, syssource)))
item = control.item(label=label)
item.setArt({'icon': thumb, 'thumb': thumb, 'poster': poster, 'banner': banner})
item.setProperty('Fanart_Image', fanart)
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
item.addContextMenuItems(cm)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=sysurl, listitem=item, isFolder=False)
except:
pass
control.content(syshandle, 'files')
control.directory(syshandle, cacheToDisc=True)
def playItem(self, title, source):
try:
meta = control.window.getProperty(self.metaProperty)
meta = json.loads(meta)
year = meta['year'] if 'year' in meta else None
season = meta['season'] if 'season' in meta else None
episode = meta['episode'] if 'episode' in meta else None
imdb = meta['imdb'] if 'imdb' in meta else None
tvdb = meta['tvdb'] if 'tvdb' in meta else None
next = [] ; prev = [] ; total = []
for i in range(1,1000):
try:
u = control.infoLabel('ListItem(%s).FolderPath' % str(i))
if u in total: raise Exception()
total.append(u)
u = dict(urlparse.parse_qsl(u.replace('?','')))
u = json.loads(u['source'])[0]
next.append(u)
except:
break
for i in range(-1000,0)[::-1]:
try:
u = control.infoLabel('ListItem(%s).FolderPath' % str(i))
if u in total: raise Exception()
total.append(u)
u = dict(urlparse.parse_qsl(u.replace('?','')))
u = json.loads(u['source'])[0]
prev.append(u)
except:
break
items = json.loads(source)
items = [i for i in items+next+prev][:40]
header = control.addonInfo('name')
header2 = header.upper()
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
block = None
for i in range(len(items)):
try:
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
if items[i]['source'] == block: raise Exception()
w = workers.Thread(self.sourcesResolve, items[i])
w.start()
offset = 60 * 2 if items[i].get('source') in self.hostcapDict else 0
m = ''
for x in range(3600):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30 + offset) and not k: break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30 + offset) and not k: break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
if m == '': break
if w.is_alive() == False: break
time.sleep(0.5)
if w.is_alive() == True: block = items[i]['source']
if self.url == None: raise Exception()
try: progressDialog.close()
except: pass
control.sleep(200)
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, self.url, meta)
return self.url
except:
pass
try: progressDialog.close()
except: pass
self.errorForSources()
except:
pass
def getSources(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, quality='HD', timeout=30):
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(control.addonInfo('name'), '')
progressDialog.update(0)
self.prepareSources()
sourceDict = self.sourceDict
progressDialog.update(0, control.lang(32600).encode('utf-8'))
content = 'movie' if tvshowtitle == None else 'episode'
if content == 'movie':
sourceDict = [(i[0], i[1], getattr(i[1], 'movie', None)) for i in sourceDict]
genres = trakt.getGenre('movie', 'imdb', imdb)
else:
sourceDict = [(i[0], i[1], getattr(i[1], 'tvshow', None)) for i in sourceDict]
genres = trakt.getGenre('show', 'tvdb', tvdb)
sourceDict = [(i[0], i[1], i[2]) for i in sourceDict if not hasattr(i[1], 'genre_filter') or not i[1].genre_filter or any(x in i[1].genre_filter for x in genres)]
sourceDict = [(i[0], i[1]) for i in sourceDict if not i[2] == None]
language = self.getLanguage()
sourceDict = [(i[0], i[1], i[1].language) for i in sourceDict]
sourceDict = [(i[0], i[1]) for i in sourceDict if any(x in i[2] for x in language)]
try: sourceDict = [(i[0], i[1], control.setting('provider.' + i[0])) for i in sourceDict]
except: sourceDict = [(i[0], i[1], 'true') for i in sourceDict]
sourceDict = [(i[0], i[1]) for i in sourceDict if not i[2] == 'false']
sourceDict = [(i[0], i[1], i[1].priority) for i in sourceDict]
random.shuffle(sourceDict)
sourceDict = sorted(sourceDict, key=lambda i: i[2])
threads = []
if content == 'movie':
title = self.getTitle(title)
localtitle = self.getLocalTitle(title, imdb, tvdb, content)
aliases = self.getAliasTitles(imdb, localtitle, content)
for i in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, localtitle, aliases, year, imdb, i[0], i[1]))
else:
tvshowtitle = self.getTitle(tvshowtitle)
localtvshowtitle = self.getLocalTitle(tvshowtitle, imdb, tvdb, content)
aliases = self.getAliasTitles(imdb, localtvshowtitle, content)
#Disabled on 11/11/17 due to hang. Should be checked in the future and possible enabled again.
#season, episode = thexem.get_scene_episode_number(tvdb, season, episode)
for i in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, localtvshowtitle, aliases, premiered, i[0], i[1]))
s = [i[0] + (i[1],) for i in zip(sourceDict, threads)]
s = [(i[3].getName(), i[0], i[2]) for i in s]
mainsourceDict = [i[0] for i in s if i[2] == 0]
sourcelabelDict = dict([(i[0], i[1].upper()) for i in s])
[i.start() for i in threads]
string1 = control.lang(32404).encode('utf-8')
string2 = control.lang(32405).encode('utf-8')
string3 = control.lang(32406).encode('utf-8')
string4 = control.lang(32601).encode('utf-8')
string5 = control.lang(32602).encode('utf-8')
string6 = control.lang(32606).encode('utf-8')
string7 = control.lang(32607).encode('utf-8')
try: timeout = int(control.setting('scrapers.timeout.1'))
except: pass
quality = control.setting('hosts.quality')
if quality == '': quality = '0'
line1 = line2 = line3 = ""
source_4k = d_source_4k = 0
source_1080 = d_source_1080 = 0
source_720 = d_source_720 = 0
source_sd = d_source_sd = 0
total = d_total = 0
debrid_list = debrid.debrid_resolvers
debrid_status = debrid.status()
total_format = '[COLOR %s][B]%s[/B][/COLOR]'
pdiag_format = ' 4K: %s | 1080p: %s | 720p: %s | SD: %s | %s: %s'.split('|')
pdiag_bg_format = '4K:%s(%s)|1080p:%s(%s)|720p:%s(%s)|SD:%s(%s)|T:%s(%s)'.split('|')
for i in range(0, 4 * timeout):
try:
if xbmc.abortRequested == True: return sys.exit()
try:
if progressDialog.iscanceled(): break
except:
pass
if len(self.sources) > 0:
if quality in ['0']:
source_4k = len([e for e in self.sources if e['quality'] == '4K' and e['debridonly'] == False])
source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and e['debridonly'] == False])
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] == False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] == False])
elif quality in ['1']:
source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and e['debridonly'] == False])
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] == False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] == False])
elif quality in ['2']:
source_1080 = len([e for e in self.sources if e['quality'] in ['1080p'] and e['debridonly'] == False])
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] == False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] == False])
elif quality in ['3']:
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] == False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] == False])
else:
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] == False])
total = source_4k + source_1080 + source_720 + source_sd
if debrid_status:
if quality in ['0']:
for d in debrid_list:
d_source_4k = len([e for e in self.sources if e['quality'] == '4K' and d.valid_url('', e['source'])])
d_source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and d.valid_url('', e['source'])])
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url('', e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url('', e['source'])])
elif quality in ['1']:
for d in debrid_list:
d_source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and d.valid_url('', e['source'])])
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url('', e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url('', e['source'])])
elif quality in ['2']:
for d in debrid_list:
d_source_1080 = len([e for e in self.sources if e['quality'] in ['1080p'] and d.valid_url('', e['source'])])
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url('', e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url('', e['source'])])
elif quality in ['3']:
for d in debrid_list:
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url('', e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url('', e['source'])])
else:
for d in debrid_list:
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url('', e['source'])])
d_total = d_source_4k + d_source_1080 + d_source_720 + d_source_sd
if debrid_status:
d_4k_label = total_format % ('red', d_source_4k) if d_source_4k == 0 else total_format % ('lime', d_source_4k)
d_1080_label = total_format % ('red', d_source_1080) if d_source_1080 == 0 else total_format % ('lime', d_source_1080)
d_720_label = total_format % ('red', d_source_720) if d_source_720 == 0 else total_format % ('lime', d_source_720)
d_sd_label = total_format % ('red', d_source_sd) if d_source_sd == 0 else total_format % ('lime', d_source_sd)
d_total_label = total_format % ('red', d_total) if d_total == 0 else total_format % ('lime', d_total)
source_4k_label = total_format % ('red', source_4k) if source_4k == 0 else total_format % ('lime', source_4k)
source_1080_label = total_format % ('red', source_1080) if source_1080 == 0 else total_format % ('lime', source_1080)
source_720_label = total_format % ('red', source_720) if source_720 == 0 else total_format % ('lime', source_720)
source_sd_label = total_format % ('red', source_sd) if source_sd == 0 else total_format % ('lime', source_sd)
source_total_label = total_format % ('red', total) if total == 0 else total_format % ('lime', total)
if (i / 2) < timeout:
try:
mainleft = [sourcelabelDict[x.getName()] for x in threads if x.is_alive() == True and x.getName() in mainsourceDict]
info = [sourcelabelDict[x.getName()] for x in threads if x.is_alive() == True]
if i >= timeout and len(mainleft) == 0 and len(self.sources) >= 100 * len(info): break # improve responsiveness
if debrid_status:
if quality in ['0']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format)) % (string6, d_4k_label, d_1080_label, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format)) % (string7, source_4k_label, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
print line1, line2
else:
line1 = '|'.join(pdiag_bg_format[:-1]) % (source_4k_label, d_4k_label, source_1080_label, d_1080_label, source_720_label, d_720_label, source_sd_label, d_sd_label)
elif quality in ['1']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[1:])) % (string6, d_1080_label, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[1:])) % (string7, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
else:
line1 = '|'.join(pdiag_bg_format[1:]) % (source_1080_label, d_1080_label, source_720_label, d_720_label, source_sd_label, d_sd_label, source_total_label, d_total_label)
elif quality in ['2']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[1:])) % (string6, d_1080_label, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[1:])) % (string7, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
else:
line1 = '|'.join(pdiag_bg_format[1:]) % (source_1080_label, d_1080_label, source_720_label, d_720_label, source_sd_label, d_sd_label, source_total_label, d_total_label)
elif quality in ['3']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[2:])) % (string6, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[2:])) % (string7, source_720_label, source_sd_label, str(string4), source_total_label)
else:
line1 = '|'.join(pdiag_bg_format[2:]) % (source_720_label, d_720_label, source_sd_label, d_sd_label, source_total_label, d_total_label)
else:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[3:])) % (string6, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[3:])) % (string7, source_sd_label, str(string4), source_total_label)
else:
line1 = '|'.join(pdiag_bg_format[3:]) % (source_sd_label, d_sd_label, source_total_label, d_total_label)
else:
if quality in ['0']:
line1 = '|'.join(pdiag_format) % (source_4k_label, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
elif quality in ['1']:
line1 = '|'.join(pdiag_format[1:]) % (source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
elif quality in ['2']:
line1 = '|'.join(pdiag_format[1:]) % (source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
elif quality in ['3']:
line1 = '|'.join(pdiag_format[2:]) % (source_720_label, source_sd_label, str(string4), source_total_label)
else:
line1 = '|'.join(pdiag_format[3:]) % (source_sd_label, str(string4), source_total_label)
if debrid_status:
if len(info) > 6: line3 = string3 % (str(len(info)))
elif len(info) > 0: line3 = string3 % (', '.join(info))
else: break
percent = int(100 * float(i) / (2 * timeout) + 0.5)
if not progressDialog == control.progressDialogBG: progressDialog.update(max(1, percent), line1, line2, line3)
else: progressDialog.update(max(1, percent), line1, line3)
else:
if len(info) > 6: line2 = string3 % (str(len(info)))
elif len(info) > 0: line2 = string3 % (', '.join(info))
else: break
percent = int(100 * float(i) / (2 * timeout) + 0.5)
progressDialog.update(max(1, percent), line1, line2)
except Exception as e:
log_utils.log('Exception Raised: %s' % str(e), log_utils.LOGERROR)
else:
try:
mainleft = [sourcelabelDict[x.getName()] for x in threads if x.is_alive() == True and x.getName() in mainsourceDict]
info = mainleft
if debrid_status:
if len(info) > 6: line3 = 'Waiting for: %s' % (str(len(info)))
elif len(info) > 0: line3 = 'Waiting for: %s' % (', '.join(info))
else: break
percent = int(100 * float(i) / (2 * timeout) + 0.5) % 100
if not progressDialog == control.progressDialogBG: progressDialog.update(max(1, percent), line1, line2, line3)
else: progressDialog.update(max(1, percent), line1, line3)
else:
if len(info) > 6: line2 = 'Waiting for: %s' % (str(len(info)))
elif len(info) > 0: line2 = 'Waiting for: %s' % (', '.join(info))
else: break
percent = int(100 * float(i) / (2 * timeout) + 0.5) % 100
progressDialog.update(max(1, percent), line1, line2)
except:
break
time.sleep(0.5)
except:
pass
if control.addonInfo('id') == 'plugin.video.bennu':
try:
if progressDialog: progressDialog.update(100, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
items = self.sourcesFilter()
if quality == 'RD': items = [i for i in items if i['debrid'] != '']
elif quality == 'SD': items = [i for i in items if i['quality'] == 'SD' and i['debrid'] == '']
elif quality == 'HD': items = [i for i in items if i['quality'] != 'SD']
if control.setting('bennu.dev.log') == 'true':
log_utils.log('Sources Returned: %s' % str(items), log_utils.LOGNOTICE)
try: progressDialog.close()
except: pass
if quality == 'AUTO':
u = self.sourcesDirect(items)
return u
else:
meta = '{"title": "%s", "year": "%s", "imdb": "%s"}' % (title, year, imdb)
'''control.window.clearProperty("plugin.video.bennu.container.items")
control.window.setProperty("plugin.video.bennu.container.items", json.dumps(items))
control.window.clearProperty("plugin.video.bennu.container.meta")
control.window.setProperty("plugin.video.bennu.container.meta", meta)'''
control.window.clearProperty(self.itemProperty)
control.window.setProperty(self.itemProperty, json.dumps(items))
control.window.clearProperty(self.metaProperty)
control.window.setProperty(self.metaProperty, meta)
control.sleep(200)
control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], urllib.quote_plus(title)))
return "DIR"
except:
try: progressDialog.close()
except: pass
return
else:
try: progressDialog.close()
except: pass
self.sourcesFilter()
return self.sources
def prepareSources(self):
try:
control.makeFile(control.dataPath)
self.sourceFile = control.providercacheFile
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_url (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""rel_url TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_src (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""hosts TEXT, ""added TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
except:
pass
def getMovieSource(self, title, localtitle, aliases, year, imdb, source, call):
try:
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
except:
pass
''' Fix to stop items passed with a 0 IMDB id pulling old unrelated sources from the database. '''
if imdb == '0':
try:
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcon.commit()
except:
pass
''' END '''
try:
sources = []
dbcur.execute("SELECT * FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
match = dbcur.fetchone()
t1 = int(re.sub('[^0-9]', '', str(match[5])))
t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
update = abs(t2 - t1) > 60
if update == False:
sources = eval(match[4].encode('utf-8'))
return self.sources.extend(sources)
except:
pass
try:
url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
url = dbcur.fetchone()
url = eval(url[4].encode('utf-8'))
except:
pass
try:
if url == None: url = call.movie(imdb, title, localtitle, aliases, year)
if url == None: raise Exception()
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, '', '', repr(url)))
dbcon.commit()
except:
pass
try:
sources = []
sources = call.sources(url, self.hostDict, self.hostprDict)
if sources == None or sources == []: raise Exception()
sources = [json.loads(t) for t in set(json.dumps(d, sort_keys=True) for d in sources)]
for i in sources: i.update({'provider': source})
self.sources.extend(sources)
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?)", (source, imdb, '', '', repr(sources), datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
dbcon.commit()
except:
pass
def getEpisodeSource(self, title, year, imdb, tvdb, season, episode, tvshowtitle, localtvshowtitle, aliases, premiered, source, call):
try:
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
except:
pass
try:
sources = []
dbcur.execute("SELECT * FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
match = dbcur.fetchone()
t1 = int(re.sub('[^0-9]', '', str(match[5])))
t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
update = abs(t2 - t1) > 60
if update == False:
sources = eval(match[4].encode('utf-8'))
return self.sources.extend(sources)
except:
pass
try:
url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
url = dbcur.fetchone()
url = eval(url[4].encode('utf-8'))
except:
pass
try:
if url == None: url = call.tvshow(imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year)
if url == None: raise Exception()
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, '', '', repr(url)))
dbcon.commit()
except:
pass
try:
ep_url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
ep_url = dbcur.fetchone()
ep_url = eval(ep_url[4].encode('utf-8'))
except:
pass
try:
if url == None: raise Exception()
if ep_url == None: ep_url = call.episode(url, imdb, tvdb, title, premiered, season, episode)
if ep_url == None: raise Exception()
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, season, episode, repr(ep_url)))
dbcon.commit()
except:
pass
try:
sources = []
sources = call.sources(ep_url, self.hostDict, self.hostprDict)
if sources == None or sources == []: raise Exception()
sources = [json.loads(t) for t in set(json.dumps(d, sort_keys=True) for d in sources)]
for i in sources: i.update({'provider': source})
self.sources.extend(sources)
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?)", (source, imdb, season, episode, repr(sources), datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
dbcon.commit()
except:
pass
def alterSources(self, url, meta):
try:
if control.setting('hosts.mode') == '2': url += '&select=1'
else: url += '&select=2'
control.execute('RunPlugin(%s)' % url)
except:
pass
def clearSources(self):
try:
control.idle()
yes = control.yesnoDialog(control.lang(32407).encode('utf-8'), '', '')
if not yes: return
control.makeFile(control.dataPath)
dbcon = database.connect(control.providercacheFile)
dbcur = dbcon.cursor()
dbcur.execute("DROP TABLE IF EXISTS rel_src")
dbcur.execute("DROP TABLE IF EXISTS rel_url")
dbcur.execute("VACUUM")
dbcon.commit()
control.infoDialog(control.lang(32408).encode('utf-8'), sound=True, icon='INFO')
except:
pass
def sourcesFilter(self):
provider = control.setting('hosts.sort.provider')
if provider == '': provider = 'false'
quality = control.setting('hosts.quality')
if quality == '': quality = '0'
captcha = control.setting('hosts.captcha')
if captcha == '': captcha = 'true'
HEVC = control.setting('HEVC')
random.shuffle(self.sources)
if provider == 'true':
self.sources = sorted(self.sources, key=lambda k: k['provider'])
for i in self.sources:
if 'checkquality' in i and i['checkquality'] == True:
if not i['source'].lower() in self.hosthqDict and i['quality'] not in ['SD', 'SCR', 'CAM']: i.update({'quality': 'SD'})
local = [i for i in self.sources if 'local' in i and i['local'] == True]
for i in local: i.update({'language': self._getPrimaryLang() or 'en'})
self.sources = [i for i in self.sources if not i in local]
filter = []
filter += [i for i in self.sources if i['direct'] == True]
filter += [i for i in self.sources if i['direct'] == False]
self.sources = filter
filter = []
for d in debrid.debrid_resolvers:
valid_hoster = set([i['source'] for i in self.sources])
valid_hoster = [i for i in valid_hoster if d.valid_url('', i)]
filter += [dict(i.items() + [('debrid', d.name)]) for i in self.sources if i['source'] in valid_hoster]
filter += [i for i in self.sources if not i['source'].lower() in self.hostprDict and i['debridonly'] == False]
self.sources = filter
for i in range(len(self.sources)):
q = self.sources[i]['quality']
if q == 'HD': self.sources[i].update({'quality': '720p'})
filter = []
filter += local
if quality in ['0']: filter += [i for i in self.sources if i['quality'] == '4K' and 'debrid' in i]
if quality in ['0']: filter += [i for i in self.sources if i['quality'] == '4K' and not 'debrid' in i and 'memberonly' in i]
if quality in ['0']: filter += [i for i in self.sources if i['quality'] == '4K' and not 'debrid' in i and not 'memberonly' in i]
if quality in ['0', '1']: filter += [i for i in self.sources if i['quality'] == '1440p' and 'debrid' in i]
if quality in ['0', '1']: filter += [i for i in self.sources if i['quality'] == '1440p' and not 'debrid' in i and 'memberonly' in i]
if quality in ['0', '1']: filter += [i for i in self.sources if i['quality'] == '1440p' and not 'debrid' in i and not 'memberonly' in i]
if quality in ['0', '1', '2']: filter += [i for i in self.sources if i['quality'] == '1080p' and 'debrid' in i]
if quality in ['0', '1', '2']: filter += [i for i in self.sources if i['quality'] == '1080p' and not 'debrid' in i and 'memberonly' in i]
if quality in ['0', '1', '2']: filter += [i for i in self.sources if i['quality'] == '1080p' and not 'debrid' in i and not 'memberonly' in i]
if quality in ['0', '1', '2', '3']: filter += [i for i in self.sources if i['quality'] == '720p' and 'debrid' in i]
if quality in ['0', '1', '2', '3']: filter += [i for i in self.sources if i['quality'] == '720p' and not 'debrid' in i and 'memberonly' in i]
if quality in ['0', '1', '2', '3']: filter += [i for i in self.sources if i['quality'] == '720p' and not 'debrid' in i and not 'memberonly' in i]
filter += [i for i in self.sources if i['quality'] in ['SD', 'SCR', 'CAM']]
self.sources = filter
if not captcha == 'true':
filter = [i for i in self.sources if i['source'].lower() in self.hostcapDict and not 'debrid' in i]
self.sources = [i for i in self.sources if not i in filter]
filter = [i for i in self.sources if i['source'].lower() in self.hostblockDict and not 'debrid' in i]
self.sources = [i for i in self.sources if not i in filter]
multi = [i['language'] for i in self.sources]
multi = [x for y,x in enumerate(multi) if x not in multi[:y]]
multi = True if len(multi) > 1 else False
if multi == True:
self.sources = [i for i in self.sources if not i['language'] == 'en'] + [i for i in self.sources if i['language'] == 'en']
self.sources = self.sources[:2000]
extra_info = control.setting('sources.extrainfo')
prem_identify = control.setting('prem.identify')
if prem_identify == '': prem_identify = 'blue'
prem_identify = self.getPremColor(prem_identify)
for i in range(len(self.sources)):
if extra_info == 'true': t = source_utils.getFileType(self.sources[i]['url'])
else: t = None
u = self.sources[i]['url']
p = self.sources[i]['provider']
q = self.sources[i]['quality']
s = self.sources[i]['source']
s = s.rsplit('.', 1)[0]
l = self.sources[i]['language']
try: f = (' | '.join(['[I]%s [/I]' % info.strip() for info in self.sources[i]['info'].split('|')]))
except: f = ''
try: d = self.sources[i]['debrid']
except: d = self.sources[i]['debrid'] = ''
if d.lower() == 'real-debrid': d = 'RD'
if not d == '': label = '%02d | [B]%s | %s[/B] | ' % (int(i+1), d, p)
else: label = '%02d | [B]%s[/B] | ' % (int(i+1), p)
if multi == True and not l == 'en': label += '[B]%s[/B] | ' % l
if t:
if q in ['4K', '1440p', '1080p', '720p']: label += '%s | [B][I]%s [/I][/B] | [I]%s[/I] | %s' % (s, q, t, f)
elif q == 'SD': label += '%s | %s | [I]%s[/I]' % (s, f, t)
else: label += '%s | %s | [I]%s [/I] | [I]%s[/I]' % (s, f, q, t)
else:
if q in ['4K', '1440p', '1080p', '720p']: label += '%s | [B][I]%s [/I][/B] | %s' % (s, q, f)
elif q == 'SD': label += '%s | %s' % (s, f)
else: label += '%s | %s | [I]%s [/I]' % (s, f, q)
label = label.replace('| 0 |', '|').replace(' | [I]0 [/I]', '')
label = re.sub('\[I\]\s+\[/I\]', ' ', label)
label = re.sub('\|\s+\|', '|', label)
label = re.sub('\|(?:\s+|)$', '', label)
if d:
if not prem_identify == 'nocolor':
self.sources[i]['label'] = ('[COLOR %s]' % (prem_identify)) + label.upper() + '[/COLOR]'
else: self.sources[i]['label'] = label.upper()
else: self.sources[i]['label'] = label.upper()
try:
if not HEVC == 'true': self.sources = [i for i in self.sources if not 'HEVC' in i['label']]
except: pass
self.sources = [i for i in self.sources if 'label' in i]
return self.sources
def sourcesResolve(self, item, info=False):
try:
self.url = None
u = url = item['url']
d = item['debrid'] ; direct = item['direct']
local = item.get('local', False)
provider = item['provider']
call = [i[1] for i in self.sourceDict if i[0] == provider][0]
u = url = call.resolve(url)
if url == None or (not '://' in str(url) and not local): raise Exception()
if not local:
url = url[8:] if url.startswith('stack:') else url
urls = []
for part in url.split(' , '):
u = part
if not d == '':
part = debrid.resolver(part, d)
elif not direct == True:
hmf = urlresolver.HostedMediaFile(url=u, include_disabled=True, include_universal=False)
if hmf.valid_url() == True: part = hmf.resolve()
urls.append(part)
url = 'stack://' + ' , '.join(urls) if len(urls) > 1 else urls[0]
if url == False or url == None: raise Exception()
ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
if ext == 'rar': raise Exception()
try: headers = url.rsplit('|', 1)[1]
except: headers = ''
headers = urllib.quote_plus(headers).replace('%3D', '=') if ' ' in headers else headers
headers = dict(urlparse.parse_qsl(headers))
if url.startswith('http') and '.m3u8' in url:
result = client.request(url.split('|')[0], headers=headers, output='geturl', timeout='20')
if result == None: raise Exception()
elif url.startswith('http'):
result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20')
if result == None: raise Exception()
self.url = url
return url
except:
if info == True: self.errorForSources()
return
def sourcesDialog(self, items):
try:
labels = [i['label'] for i in items]
select = control.selectDialog(labels)
if select == -1: return 'close://'
next = [y for x,y in enumerate(items) if x >= select]
prev = [y for x,y in enumerate(items) if x < select][::-1]
items = [items[select]]
items = [i for i in items+next+prev][:40]
header = control.addonInfo('name')
header2 = header.upper()
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
block = None
for i in range(len(items)):
try:
if items[i]['source'] == block: raise Exception()
w = workers.Thread(self.sourcesResolve, items[i])
w.start()
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
m = ''
for x in range(3600):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
if m == '': break
if w.is_alive() == False: break
time.sleep(0.5)
if w.is_alive() == True: block = items[i]['source']
if self.url == None: raise Exception()
self.selectedSource = items[i]['label']
try: progressDialog.close()
except: pass
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
return self.url
except:
pass
try: progressDialog.close()
except: pass
except Exception as e:
try: progressDialog.close()
except: pass
log_utils.log('Error %s' % str(e), log_utils.LOGNOTICE)
def sourcesDirect(self, items):
filter = [i for i in items if i['source'].lower() in self.hostcapDict and i['debrid'] == '']
items = [i for i in items if not i in filter]
filter = [i for i in items if i['source'].lower() in self.hostblockDict and i['debrid'] == '']
items = [i for i in items if not i in filter]
items = [i for i in items if ('autoplay' in i and i['autoplay'] == True) or not 'autoplay' in i]
if control.setting('autoplay.sd') == 'true':
items = [i for i in items if not i['quality'] in ['4K', '1440p', '1080p', 'HD']]
u = None
header = control.addonInfo('name')
header2 = header.upper()
try:
control.sleep(1000)
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
except:
pass
for i in range(len(items)):
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
try:
if xbmc.abortRequested == True: return sys.exit()
url = self.sourcesResolve(items[i])
if u == None: u = url
if not url == None: break
except:
pass
try: progressDialog.close()
except: pass
return u
def errorForSources(self):
control.infoDialog(control.lang(32401).encode('utf-8'), sound=False, icon='INFO')
def getLanguage(self):
langDict = {'English': ['en'], 'German': ['de'], 'German+English': ['de','en'], 'French': ['fr'], 'French+English': ['fr', 'en'], 'Portuguese': ['pt'], 'Portuguese+English': ['pt', 'en'], 'Polish': ['pl'], 'Polish+English': ['pl', 'en'], 'Korean': ['ko'], 'Korean+English': ['ko', 'en'], 'Russian': ['ru'], 'Russian+English': ['ru', 'en'], 'Spanish': ['es'], 'Spanish+English': ['es', 'en'], 'Greek': ['gr'], 'Italian': ['it'], 'Italian+English': ['it', 'en'], 'Greek+English': ['gr', 'en']}
name = control.setting('providers.lang')
return langDict.get(name, ['en'])
def getLocalTitle(self, title, imdb, tvdb, content):
lang = self._getPrimaryLang()
if not lang:
return title
if content == 'movie':
t = trakt.getMovieTranslation(imdb, lang)
else:
t = tvmaze.tvMaze().getTVShowTranslation(tvdb, lang)
return t or title
def getAliasTitles(self, imdb, localtitle, content):
lang = self._getPrimaryLang()
try:
t = trakt.getMovieAliases(imdb) if content == 'movie' else trakt.getTVShowAliases(imdb)
t = [i for i in t if i.get('country', '').lower() in [lang, '', 'us'] and i.get('title', '').lower() != localtitle.lower()]
return t
except:
return []
def _getPrimaryLang(self):
langDict = {'English': 'en', 'German': 'de', 'German+English': 'de', 'French': 'fr', 'French+English': 'fr', 'Portuguese': 'pt', 'Portuguese+English': 'pt', 'Polish': 'pl', 'Polish+English': 'pl', 'Korean': 'ko', 'Korean+English': 'ko', 'Russian': 'ru', 'Russian+English': 'ru', 'Spanish': 'es', 'Spanish+English': 'es', 'Italian': 'it', 'Italian+English': 'it', 'Greek': 'gr', 'Greek+English': 'gr'}
name = control.setting('providers.lang')
lang = langDict.get(name)
return lang
def getTitle(self, title):
title = cleantitle.normalize(title)
return title
def getConstants(self):
self.itemProperty = 'plugin.video.covenant.container.items'
self.metaProperty = 'plugin.video.covenant.container.meta'
from resources.lib.sources import sources
self.sourceDict = sources()
try:
self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
except:
self.hostDict = []
self.hostprDict = ['1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net']
self.hostcapDict = ['hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'oload.tv', 'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se']
self.hosthqDict = ['gvideo', 'google.com', 'openload.io', 'openload.co', 'oload.tv', 'thevideo.me', 'rapidvideo.com', 'raptu.com', 'filez.tv', 'uptobox.com', 'uptobox.com', 'uptostream.com', 'xvidstage.com', 'streamango.com']
self.hostblockDict = []
def getPremColor(self, n):
if n == '0': n = 'blue'
elif n == '1': n = 'red'
elif n == '2': n = 'yellow'
elif n == '3': n = 'deeppink'
elif n == '4': n = 'cyan'
elif n == '5': n = 'lawngreen'
elif n == '6': n = 'gold'
elif n == '7': n = 'magenta'
elif n == '8': n = 'yellowgreen'
elif n == '9': n = 'nocolor'
else: n == 'blue'
return n
| apache-2.0 |
GhostshipSoftware/avaloria | contrib/extended_room.py | 2 | 17621 | """
Extended Room
Evennia Contribution - Griatch 2012
This is an extended Room typeclass for Evennia. It is supported
by an extended Look command and an extended @desc command, also
in this module.
Features:
1) Time-changing description slots
This allows to change the full description text the room shows
depending on larger time variations. Four seasons - spring, summer,
autumn and winter are used by default). The season is calculated
on-demand (no Script or timer needed) and updates the full text block.
There is also a general description which is used as fallback if
one or more of the seasonal descriptions are not set when their
time comes.
An updated @desc command allows for setting seasonal descriptions.
The room uses the src.utils.gametime.GameTime global script. This is
started by default, but if you have deactivated it, you need to
supply your own time keeping mechanism.
2) In-description changing tags
Within each seasonal (or general) description text, you can also embed
time-of-day dependent sections. Text inside such a tag will only show
during that particular time of day. The tags looks like <timeslot> ...
</timeslot>. By default there are four timeslots per day - morning,
afternoon, evening and night.
3) Details
The Extended Room can be "detailed" with special keywords. This makes
use of a special Look command. Details are "virtual" targets to look
at, without there having to be a database object created for it. The
Details are simply stored in a dictionary on the room and if the look
command cannot find an object match for a "look <target>" command it
will also look through the available details at the current location
if applicable. An extended @desc command is used to set details.
4) Extra commands
CmdExtendedLook - look command supporting room details
CmdExtendedDesc - @desc command allowing to add seasonal descs and details,
as well as listing them
CmdGameTime - A simple "time" command, displaying the current
time and season.
Installation/testing:
1) Add CmdExtendedLook, CmdExtendedDesc and CmdGameTime to the default cmdset
(see wiki how to do this).
2) @dig a room of type contrib.extended_room.ExtendedRoom (or make it the
default room type)
3) Use @desc and @detail to customize the room, then play around!
"""
import re
from django.conf import settings
from ev import Room
from ev import gametime
from ev import default_cmds
from ev import utils
# error return function, needed by Extended Look command
_AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
# regexes for in-desc replacements
RE_MORNING = re.compile(r"<morning>(.*?)</morning>", re.IGNORECASE)
RE_AFTERNOON = re.compile(r"<afternoon>(.*?)</afternoon>", re.IGNORECASE)
RE_EVENING = re.compile(r"<evening>(.*?)</evening>", re.IGNORECASE)
RE_NIGHT = re.compile(r"<night>(.*?)</night>", re.IGNORECASE)
# this map is just a faster way to select the right regexes (the first
# regex in each tuple will be parsed, the following will always be weeded out)
REGEXMAP = {"morning": (RE_MORNING, RE_AFTERNOON, RE_EVENING, RE_NIGHT),
"afternoon": (RE_AFTERNOON, RE_MORNING, RE_EVENING, RE_NIGHT),
"evening": (RE_EVENING, RE_MORNING, RE_AFTERNOON, RE_NIGHT),
"night": (RE_NIGHT, RE_MORNING, RE_AFTERNOON, RE_EVENING)}
# set up the seasons and time slots. This assumes gametime started at the
# beginning of the year (so month 1 is equivalent to January), and that
# one CAN divive the game's year into four seasons in the first place ...
MONTHS_PER_YEAR = settings.TIME_MONTH_PER_YEAR
SEASONAL_BOUNDARIES = (3 / 12.0, 6 / 12.0, 9 / 12.0)
HOURS_PER_DAY = settings.TIME_HOUR_PER_DAY
DAY_BOUNDARIES = (0, 6 / 24.0, 12 / 24.0, 18 / 24.0)
# implements the Extended Room
class ExtendedRoom(Room):
"""
This room implements a more advanced look functionality depending on
time. It also allows for "details", together with a slightly modified
look command.
"""
def at_object_creation(self):
"Called when room is first created only."
self.db.spring_desc = ""
self.db.summer_desc = ""
self.db.autumn_desc = ""
self.db.winter_desc = ""
# the general desc is used as a fallback if a seasonal one is not set
self.db.general_desc = ""
# will be set dynamically. Can contain raw timeslot codes
self.db.raw_desc = ""
# this will be set dynamically at first look. Parsed for timeslot codes
self.db.desc = ""
# these will be filled later
self.ndb.last_season = None
self.ndb.last_timeslot = None
# detail storage
self.db.details = {}
def get_time_and_season(self):
"""
Calculate the current time and season ids
"""
# get the current time as parts of year and parts of day
# returns a tuple (years,months,weeks,days,hours,minutes,sec)
time = gametime.gametime(format=True)
month, hour = time[1], time[4]
season = float(month) / MONTHS_PER_YEAR
timeslot = float(hour) / HOURS_PER_DAY
# figure out which slots these represent
if SEASONAL_BOUNDARIES[0] <= season < SEASONAL_BOUNDARIES[1]:
curr_season = "spring"
elif SEASONAL_BOUNDARIES[1] <= season < SEASONAL_BOUNDARIES[2]:
curr_season = "summer"
elif SEASONAL_BOUNDARIES[2] <= season < 1.0 + SEASONAL_BOUNDARIES[0]:
curr_season = "autumn"
else:
curr_season = "winter"
if DAY_BOUNDARIES[0] <= timeslot < DAY_BOUNDARIES[1]:
curr_timeslot = "night"
elif DAY_BOUNDARIES[1] <= timeslot < DAY_BOUNDARIES[2]:
curr_timeslot = "morning"
elif DAY_BOUNDARIES[2] <= timeslot < DAY_BOUNDARIES[3]:
curr_timeslot = "afternoon"
else:
curr_timeslot = "evening"
return curr_season, curr_timeslot
def replace_timeslots(self, raw_desc, curr_time):
"""
Filter so that only time markers <timeslot>...</timeslot> of the
correct timeslot remains in the description.
"""
if raw_desc:
regextuple = REGEXMAP[curr_time]
raw_desc = regextuple[0].sub(r"\1", raw_desc)
raw_desc = regextuple[1].sub("", raw_desc)
raw_desc = regextuple[2].sub("", raw_desc)
return regextuple[3].sub("", raw_desc)
return raw_desc
def return_detail(self, key):
"""
This will attempt to match a "detail" to look for in the room. A detail
is a way to offer more things to look at in a room without having to
add new objects. For this to work, we require a custom look command that
allows for "look <detail>" - the look command should defer to this
method on the current location (if it exists) before giving up on
finding the target.
Details are not season-sensitive, but are parsed for timeslot markers.
"""
try:
detail = self.db.details.get(key.lower(), None)
except AttributeError:
# this happens if no attribute details is set at all
return None
if detail:
season, timeslot = self.get_time_and_season()
detail = self.replace_timeslots(detail, timeslot)
return detail
return None
def return_appearance(self, looker):
"This is called when e.g. the look command wants to retrieve the description of this object."
raw_desc = self.db.raw_desc or ""
update = False
# get current time and season
curr_season, curr_timeslot = self.get_time_and_season()
# compare with previously stored slots
last_season = self.ndb.last_season
last_timeslot = self.ndb.last_timeslot
if curr_season != last_season:
# season changed. Load new desc, or a fallback.
if curr_season == 'spring':
new_raw_desc = self.db.spring_desc
elif curr_season == 'summer':
new_raw_desc = self.db.summer_desc
elif curr_season == 'autumn':
new_raw_desc = self.db.autumn_desc
else:
new_raw_desc = self.db.winter_desc
if new_raw_desc:
raw_desc = new_raw_desc
else:
# no seasonal desc set. Use fallback
raw_desc = self.db.general_desc or self.db.desc
self.db.raw_desc = raw_desc
self.ndb.last_season = curr_season
update = True
if curr_timeslot != last_timeslot:
# timeslot changed. Set update flag.
self.ndb.last_timeslot = curr_timeslot
update = True
if update:
# if anything changed we have to re-parse
# the raw_desc for time markers
# and re-save the description again.
self.db.desc = self.replace_timeslots(self.db.raw_desc, curr_timeslot)
# run the normal return_appearance method, now that desc is updated.
return super(ExtendedRoom, self).return_appearance(looker)
# Custom Look command supporting Room details. Add this to
# the Default cmdset to use.
class CmdExtendedLook(default_cmds.CmdLook):
"""
look
Usage:
look
look <obj>
look <room detail>
look *<player>
Observes your location, details at your location or objects in your vicinity.
"""
def func(self):
"""
Handle the looking - add fallback to details.
"""
caller = self.caller
args = self.args
if args:
looking_at_obj = caller.search(args, use_nicks=True, quiet=True)
if not looking_at_obj:
# no object found. Check if there is a matching
# detail at location.
location = caller.location
if location and hasattr(location, "return_detail") and callable(location.return_detail):
detail = location.return_detail(args)
if detail:
# we found a detail instead. Show that.
caller.msg(detail)
return
# no detail found. Trigger delayed error messages
_AT_SEARCH_RESULT(caller, args, looking_at_obj, False)
return
else:
# we need to extract the match manually.
looking_at_obj = utils.make_iter(looking_at_obj)[0]
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having a player instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
# Custom build commands for setting seasonal descriptions
# and detailing extended rooms.
class CmdExtendedDesc(default_cmds.CmdDesc):
"""
@desc - describe an object or room
Usage:
@desc[/switch] [<obj> =] <description>
@detail[/del] [<key> = <description>]
Switches for @desc:
spring - set description for <season> in current room
summer
autumn
winter
Switch for @detail:
del - delete a named detail
Sets the "desc" attribute on an object. If an object is not given,
describe the current room.
The alias @detail allows to assign a "detail" (a non-object
target for the look command) to the current room (only).
You can also embed special time markers in your room description, like this:
<night>In the darkness, the forest looks foreboding.</night>. Text
marked this way will only display when the server is truly at the given
time slot. The available times
are night, morning, afternoon and evening.
Note that @detail, seasons and time-of-day slots only works on rooms in this
version of the @desc command.
"""
aliases = ["@describe", "@detail"]
def reset_times(self, obj):
"By deleteting the caches we force a re-load."
obj.ndb.last_season = None
obj.ndb.last_timeslot = None
def func(self):
"Define extended command"
caller = self.caller
location = caller.location
if self.cmdstring == '@detail':
# switch to detailing mode. This operates only on current location
if not location:
caller.msg("No location to detail!")
return
if not self.rhs:
# no '=' used - list content of given detail
if self.args in location.db.details:
string = "{wDetail '%s' on %s:\n{n" % (self.args, location)
string += location.db.details[self.args]
caller.msg(string)
return
if not self.args:
# No args given. Return all details on location
string = "{wDetails on %s{n:\n" % location
string += "\n".join(" {w%s{n: %s" % (key, utils.crop(text)) for key, text in location.db.details.items())
caller.msg(string)
return
if self.switches and self.switches[0] in 'del':
# removing a detail.
if self.lhs in location.db.details:
del location.db.detail
caller.msg("Detail %s deleted, if it existed." % self.lhs)
self.reset_times(location)
return
# setting a detail
location.db.details[self.lhs] = self.rhs
caller.msg("Set Detail %s to '%s'." % (self.lhs, self.rhs))
self.reset_times(location)
return
else:
# we are doing a @desc call
if not self.args:
if location:
string = "{wDescriptions on %s{n:\n" % location.key
string += " {wspring:{n %s\n" % location.db.spring_desc
string += " {wsummer:{n %s\n" % location.db.summer_desc
string += " {wautumn:{n %s\n" % location.db.autumn_desc
string += " {wwinter:{n %s\n" % location.db.winter_desc
string += " {wgeneral:{n %s" % location.db.general_desc
caller.msg(string)
return
if self.switches and self.switches[0] in ("spring",
"summer",
"autumn",
"winter"):
# a seasonal switch was given
if self.rhs:
caller.msg("Seasonal descs only works with rooms, not objects.")
return
switch = self.switches[0]
if not location:
caller.msg("No location was found!")
return
if switch == 'spring':
location.db.spring_desc = self.args
elif switch == 'summer':
location.db.summer_desc = self.args
elif switch == 'autumn':
location.db.autumn_desc = self.args
elif switch == 'winter':
location.db.winter_desc = self.args
# clear flag to force an update
self.reset_times(location)
caller.msg("Seasonal description was set on %s." % location.key)
else:
# Not seasonal desc set, maybe this is not an extended room
if self.rhs:
text = self.rhs
obj = caller.search(self.lhs)
if not obj:
return
else:
text = self.args
obj = location
obj.db.desc = self.rhs # a compatability fallback
if utils.inherits_from(obj, ExtendedRoom):
# this is an extendedroom, we need to reset
# times and set general_desc
obj.db.general_desc = text
self.reset_times(obj)
caller.msg("General description was set on %s." % obj.key)
else:
caller.msg("The description was set on %s." % obj.key)
# Simple command to view the current time and season
class CmdGameTime(default_cmds.MuxCommand):
"""
Check the game time
Usage:
time
Shows the current in-game time and season.
"""
key = "time"
locks = "cmd:all()"
help_category = "General"
def func(self):
"Reads time info from current room"
location = self.caller.location
if not location or not hasattr(location, "get_time_and_season"):
self.caller.msg("No location available - you are outside time.")
else:
season, timeslot = location.get_time_and_season()
prep = "a"
if season == "autumn":
prep = "an"
self.caller.msg("It's %s %s day, in the %s." % (prep, season, timeslot))
| bsd-3-clause |
piffey/ansible | lib/ansible/modules/cloud/cloudstack/cs_sshkeypair.py | 50 | 8797 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_sshkeypair
short_description: Manages SSH keys on Apache CloudStack based clouds.
description:
- Create, register and remove SSH keys.
- If no key was found and no public key was provided and a new SSH
private/public key pair will be created and the private key will be returned.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of public key.
required: true
domain:
description:
- Domain the public key is related to.
account:
description:
- Account the public key is related to.
project:
description:
- Name of the project the public key to be registered in.
state:
description:
- State of the public key.
default: 'present'
choices: [ 'present', 'absent' ]
public_key:
description:
- String of the public key.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a new private / public key pair:
- cs_sshkeypair:
name: linus@example.com
delegate_to: localhost
register: key
- debug:
msg: 'Private key is {{ key.private_key }}'
# remove a public key by its name:
- cs_sshkeypair:
name: linus@example.com
state: absent
delegate_to: localhost
# register your existing local public key:
- cs_sshkeypair:
name: linus@example.com
public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the SSH public key.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the SSH public key.
returned: success
type: string
sample: linus@example.com
fingerprint:
description: Fingerprint of the SSH public key.
returned: success
type: string
sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28"
private_key:
description: Private key of generated SSH keypair.
returned: changed
type: string
sample: "-----BEGIN RSA PRIVATE KEY-----\nMII...8tO\n-----END RSA PRIVATE KEY-----\n"
'''
try:
import sshpubkeys
HAS_LIB_SSHPUBKEYS = True
except ImportError:
HAS_LIB_SSHPUBKEYS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_required_together,
cs_argument_spec
)
class AnsibleCloudStackSshKey(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSshKey, self).__init__(module)
self.returns = {
'privatekey': 'private_key',
'fingerprint': 'fingerprint',
}
self.ssh_key = None
def register_ssh_key(self, public_key):
ssh_key = self.get_ssh_key()
args = self._get_common_args()
name = self.module.params.get('name')
res = None
if not ssh_key:
self.result['changed'] = True
args['publickey'] = public_key
if not self.module.check_mode:
args['name'] = name
res = self.query_api('registerSSHKeyPair', **args)
else:
fingerprint = self._get_ssh_fingerprint(public_key)
if ssh_key['fingerprint'] != fingerprint:
self.result['changed'] = True
if not self.module.check_mode:
# delete the ssh key with matching name but wrong fingerprint
args['name'] = name
self.query_api('deleteSSHKeyPair', **args)
elif ssh_key['name'].lower() != name.lower():
self.result['changed'] = True
if not self.module.check_mode:
# delete the ssh key with matching fingerprint but wrong name
args['name'] = ssh_key['name']
self.query_api('deleteSSHKeyPair', **args)
# First match for key retrievment will be the fingerprint.
# We need to make another lookup if there is a key with identical name.
self.ssh_key = None
ssh_key = self.get_ssh_key()
if ssh_key and ssh_key['fingerprint'] != fingerprint:
args['name'] = name
self.query_api('deleteSSHKeyPair', **args)
if not self.module.check_mode and self.result['changed']:
args['publickey'] = public_key
args['name'] = name
res = self.query_api('registerSSHKeyPair', **args)
if res and 'keypair' in res:
ssh_key = res['keypair']
return ssh_key
def create_ssh_key(self):
ssh_key = self.get_ssh_key()
if not ssh_key:
self.result['changed'] = True
args = self._get_common_args()
args['name'] = self.module.params.get('name')
if not self.module.check_mode:
res = self.query_api('createSSHKeyPair', **args)
ssh_key = res['keypair']
return ssh_key
def remove_ssh_key(self, name=None):
ssh_key = self.get_ssh_key()
if ssh_key:
self.result['changed'] = True
args = self._get_common_args()
args['name'] = name or self.module.params.get('name')
if not self.module.check_mode:
self.query_api('deleteSSHKeyPair', **args)
return ssh_key
def _get_common_args(self):
return {
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id')
}
def get_ssh_key(self):
if not self.ssh_key:
public_key = self.module.params.get('public_key')
if public_key:
# Query by fingerprint of the public key
args_fingerprint = self._get_common_args()
args_fingerprint['fingerprint'] = self._get_ssh_fingerprint(public_key)
ssh_keys = self.query_api('listSSHKeyPairs', **args_fingerprint)
if ssh_keys and 'sshkeypair' in ssh_keys:
self.ssh_key = ssh_keys['sshkeypair'][0]
# When key has not been found by fingerprint, use the name
if not self.ssh_key:
args_name = self._get_common_args()
args_name['name'] = self.module.params.get('name')
ssh_keys = self.query_api('listSSHKeyPairs', **args_name)
if ssh_keys and 'sshkeypair' in ssh_keys:
self.ssh_key = ssh_keys['sshkeypair'][0]
return self.ssh_key
def _get_ssh_fingerprint(self, public_key):
key = sshpubkeys.SSHKey(public_key)
if hasattr(key, 'hash_md5'):
return key.hash_md5().replace(to_native('MD5:'), to_native(''))
return key.hash()
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
public_key=dict(),
domain=dict(),
account=dict(),
project=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not HAS_LIB_SSHPUBKEYS:
module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys")
acs_sshkey = AnsibleCloudStackSshKey(module)
state = module.params.get('state')
if state in ['absent']:
ssh_key = acs_sshkey.remove_ssh_key()
else:
public_key = module.params.get('public_key')
if public_key:
ssh_key = acs_sshkey.register_ssh_key(public_key)
else:
ssh_key = acs_sshkey.create_ssh_key()
result = acs_sshkey.get_result(ssh_key)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ojengwa/oh-mainline | vendor/packages/typecheck/tests/support.py | 16 | 8420 | import types
import unittest
import sys
import os.path
import time
from unittest import _strclass
def run_all_tests(test_mod=None, tests=None):
if tests is None:
tests = unittest.TestLoader().loadTestsFromModule(test_mod)
TodoTextRunner(verbosity=2).run(tests)
def adjust_path():
parent_dir = os.path.split(sys.path[0])[0]
sys.path = [parent_dir] + sys.path
class _Todo_Exception(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class Todo_Failed(_Todo_Exception):
pass
class Todo_Passed(_Todo_Exception):
pass
def TODO(message="TODO"):
def decorator(func):
def __todo_func(*args, **kwargs):
try:
ret_val = func(*args, **kwargs)
except Exception, e:
raise Todo_Failed(message)
raise Todo_Passed(message)
__todo_func.__name__ = func.__name__
__todo_func.__doc__ = func.__doc__
__todo_func.__module__ = func.__module__
return __todo_func
return decorator
class TodoResult(unittest.TestResult):
def __init__(self):
unittest.TestResult.__init__(self)
self.todo_failed = []
self.todo_passed = []
def addTodoFailed(self, test, err):
self.todo_failed.append((test, self._exc_info_to_string(err, test)))
def addTodoPassed(self, test, err):
self.todo_passed.append((test, self._exc_info_to_string(err, test)))
def wasSuccessful(self):
p_success = unittest.TestResult.wasSuccessful(self)
return p_success and not self.stillTodo()
def stillTodo(self):
return self.todo_failed or self.todo_passed
class TodoTextResult(unittest._TextTestResult, TodoResult):
def __init__(self, *vargs, **kwargs):
TodoResult.__init__(self)
unittest._TextTestResult.__init__(self, *vargs, **kwargs)
def addTodoFailed(self, test, err):
TodoResult.addTodoFailed(self, test, err)
if self.showAll:
self.stream.writeln("TODO FAIL")
elif self.dots:
self.stream.write('TF')
def addTodoPassed(self, test, err):
TodoResult.addTodoPassed(self, test, err)
if self.showAll:
self.stream.writeln("TODO PASS")
elif self.dots:
self.stream.write('TP')
def printErrors(self):
self.printErrorList('TODO(PASS)', self.todo_passed)
self.printErrorList('TODO(FAIL)', self.todo_failed)
unittest._TextTestResult.printErrors(self)
class TodoCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
""" Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
unittest.TestCase.__init__(self, methodName)
try:
self.__testMethodName = methodName
testMethod = getattr(self, methodName)
self.__testMethodDoc = testMethod.__doc__
except AttributeError:
raise ValueError, "no such test method in %s: %s" % \
(self.__class__, methodName)
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self.__testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def __str__(self):
return "%s (%s)" % (self.__testMethodName, _strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(_strclass(self.__class__), self.__testMethodName)
def __exc_info(self):
"""Return a version of sys.exc_info() with the traceback frame
minimised; usually the top level of the traceback frame is not
needed.
"""
exctype, excvalue, tb = sys.exc_info()
if sys.platform[:4] == 'java': ## tracebacks look different in Jython
return (exctype, excvalue, tb)
return (exctype, excvalue, tb)
def run(self, result):
result.startTest(self)
testMethod = getattr(self, self.__testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self.__exc_info())
return
ok = False
try:
testMethod()
ok = True
except Todo_Failed:
result.addTodoFailed(self, self.__exc_info())
except Todo_Passed:
result.addTodoPassed(self, self.__exc_info())
except self.failureException:
result.addFailure(self, self.__exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self.__exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self.__exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
class TodoTextRunner(unittest.TextTestRunner):
def run(self, test):
"Run the given test case or test suite."
result = TodoTextResult(self.stream, self.descriptions, self.verbosity)
startTime = time.time()
test.run(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if result.stillTodo():
self.stream.write("TODO (")
else:
self.stream.write("FAILED (")
status = ("failures", "errors", "todo_passed", "todo_failed")
self.stream.write(", ".join("%s=%d" % (s, len(getattr(result, s))) for s in status))
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
TestCase = TodoCase #unittest.TestCase
### The following are some convenience functions used throughout the test
### suite
def test_equality(eq_tests, ne_tests, repeats=10):
eq_error = "Problem with __eq__ with %s and %s"
ne_error = "Problem with __ne__ with %s and %s"
# We run this multiple times to try and shake out any errors
# related to differences in set/dict/etc ordering
for _ in xrange(0, repeats):
for (left, right) in eq_tests:
try:
assert left == right
except AssertionError:
raise AssertionError(eq_error % (left, right))
try:
assert not left != right
except AssertionError:
raise AssertionError(ne_error % (left, right))
for (left, right) in ne_tests:
try:
assert left != right
except AssertionError:
raise AssertionError(ne_error % (left, right))
try:
assert not left == right
except AssertionError:
raise AssertionError(eq_error % (left, right))
def test_hash(eq_tests, ne_tests, repeats=10):
hash_error = "Problem with hash() with %s and %s"
# We run this multiple times to try and shake out any errors
# related to differences in set/dict/etc ordering
for _ in xrange(0, repeats):
for (left, right) in eq_tests:
try:
assert hash(left) == hash(right)
except AssertionError:
raise AssertionError(hash_error % (left, right))
for (left, right) in ne_tests:
try:
assert hash(left) != hash(right)
except AssertionError:
raise AssertionError(hash_error % (left, right))
| agpl-3.0 |
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/modjy/modjy_publish.py | 109 | 6547 | ###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
import sys
import synchronize
from java.io import File
from modjy_exceptions import *
class modjy_publisher:
def init_publisher(self):
self.cache = None
if self.params['app_directory']:
self.app_directory = self.expand_relative_path(self.params['app_directory'])
else:
self.app_directory = self.servlet_context.getRealPath('/')
self.params['app_directory'] = self.app_directory
if self.app_directory is not None and not self.app_directory in sys.path:
sys.path.append(self.app_directory)
def map_uri(self, req, environ):
source_uri = '%s%s%s' % (self.app_directory, File.separator, self.params['app_filename'])
callable_name = self.params['app_callable_name']
if self.params['callable_query_name']:
query_string = req.getQueryString()
if query_string:
for name_val in query_string.split('&'):
if name_val.find('=') != -1:
name, value = name_val.split('=', 1)
else:
name, value = name_val, ''
if name == self.params['callable_query_name']:
callable_name = value
else:
callable_name = ''
return source_uri, callable_name
def get_app_object(self, req, environ):
environ["SCRIPT_NAME"] = "%s%s" % (req.getContextPath(), req.getServletPath())
path_info = req.getPathInfo() or ""
environ["PATH_INFO"] = path_info
environ["PATH_TRANSLATED"] = File(self.app_directory, path_info).getPath()
if self.params['app_import_name']:
return self.get_app_object_importable(self.params['app_import_name'])
else:
if self.cache is None:
self.cache = {}
return self.get_app_object_old_style(req, environ)
get_app_object = synchronize.make_synchronized(get_app_object)
def get_app_object_importable(self, importable_name):
self.log.debug("Attempting to import application callable '%s'\n" % (importable_name, ))
# Under the importable mechanism, the cache contains a single object
if self.cache is None:
application, instantiable, method_name = self.load_importable(importable_name.strip())
if instantiable and self.params['cache_callables']:
application = application()
self.cache = application, instantiable, method_name
application, instantiable, method_name = self.cache
self.log.debug("Application is " + str(application))
if instantiable and not self.params['cache_callables']:
application = application()
self.log.debug("Instantiated application is " + str(application))
if method_name is not None:
if not hasattr(application, method_name):
self.log.fatal("Attribute error application callable '%s' as no method '%s'" % (application, method_name))
self.raise_exc(ApplicationNotFound, "Attribute error application callable '%s' as no method '%s'" % (application, method_name))
application = getattr(application, method_name)
self.log.debug("Application method is " + str(application))
return application
def load_importable(self, name):
try:
instantiable = False ; method_name = None
importable_name = name
if name.find('()') != -1:
instantiable = True
importable_name, method_name = name.split('()')
if method_name.startswith('.'):
method_name = method_name[1:]
if not method_name:
method_name = None
module_path, from_name = importable_name.rsplit('.', 1)
imported = __import__(module_path, globals(), locals(), [from_name])
imported = getattr(imported, from_name)
return imported, instantiable, method_name
except (ImportError, AttributeError), aix:
self.log.fatal("Import error import application callable '%s': %s\n" % (name, str(aix)))
self.raise_exc(ApplicationNotFound, "Failed to import app callable '%s': %s" % (name, str(aix)))
def get_app_object_old_style(self, req, environ):
source_uri, callable_name = self.map_uri(req, environ)
source_filename = source_uri
if not self.params['cache_callables']:
self.log.debug("Caching of callables disabled")
return self.load_object(source_filename, callable_name)
if not self.cache.has_key( (source_filename, callable_name) ):
self.log.debug("Callable object not in cache: %s#%s" % (source_filename, callable_name) )
return self.load_object(source_filename, callable_name)
app_callable, last_mod = self.cache.get( (source_filename, callable_name) )
self.log.debug("Callable object was in cache: %s#%s" % (source_filename, callable_name) )
if self.params['reload_on_mod']:
f = File(source_filename)
if f.lastModified() > last_mod:
self.log.info("Source file '%s' has been modified: reloading" % source_filename)
return self.load_object(source_filename, callable_name)
return app_callable
def load_object(self, path, callable_name):
try:
app_ns = {} ; execfile(path, app_ns)
app_callable = app_ns[callable_name]
f = File(path)
self.cache[ (path, callable_name) ] = (app_callable, f.lastModified())
return app_callable
except IOError, ioe:
self.raise_exc(ApplicationNotFound, "Application filename not found: %s" % path)
except KeyError, k:
self.raise_exc(NoCallable, "No callable named '%s' in %s" % (callable_name, path))
except Exception, x:
self.raise_exc(NoCallable, "Error loading jython callable '%s': %s" % (callable_name, str(x)) )
| epl-1.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.