repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
shikhar413/openmc | tests/regression_tests/filter_energyfun/test.py | 8 | 1854 | import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
@pytest.fixture
def model():
model = openmc.model.Model()
m = openmc.Material()
m.set_density('g/cm3', 10.0)
m.add_nuclide('Am241', 1.0)
model.materials.append(m)
s = openmc.Sphere(r=100.0, boundary_type='vacuum')
c = openmc.Cell(fill=m, region=-s)
model.geometry = openmc.Geometry([c])
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 1000
# Define Am242m / Am242 branching ratio from ENDF/B-VII.1 data.
x = [1e-5, 3.69e-1, 1e3, 1e5, 6e5, 1e6, 2e6, 4e6, 3e7]
y = [0.1, 0.1, 0.1333, 0.158, 0.18467, 0.25618, 0.4297, 0.48, 0.48]
# Make an EnergyFunctionFilter directly from the x and y lists.
filt1 = openmc.EnergyFunctionFilter(x, y)
# Also make a filter with the .from_tabulated1d constructor. Make sure
# the filters are identical.
tab1d = openmc.data.Tabulated1D(x, y)
filt2 = openmc.EnergyFunctionFilter.from_tabulated1d(tab1d)
assert filt1 == filt2, 'Error with the .from_tabulated1d constructor'
# Make tallies
tallies = [openmc.Tally(), openmc.Tally()]
for t in tallies:
t.scores = ['(n,gamma)']
t.nuclides = ['Am241']
tallies[1].filters = [filt1]
model.tallies.extend(tallies)
return model
class FilterEnergyFunHarness(PyAPITestHarness):
def _get_results(self):
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Use tally arithmetic to compute the branching ratio.
br_tally = sp.tallies[2] / sp.tallies[1]
# Output the tally in a Pandas DataFrame.
return br_tally.get_pandas_dataframe().to_string() + '\n'
def test_filter_energyfun(model):
harness = FilterEnergyFunHarness('statepoint.5.h5', model)
harness.main()
| mit |
drbild/boto | boto/sdb/db/property.py | 153 | 25086 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import datetime
from boto.sdb.db.key import Key
from boto.utils import Password
from boto.sdb.db.query import Query
import re
import boto
import boto.s3.key
from boto.sdb.db.blob import Blob
from boto.compat import six, long_type
class Property(object):
data_type = str
type_name = ''
name = ''
verbose_name = ''
def __init__(self, verbose_name=None, name=None, default=None,
required=False, validator=None, choices=None, unique=False):
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validator = validator
self.choices = choices
if self.name:
self.slot_name = '_' + self.name
else:
self.slot_name = '_'
self.unique = unique
def __get__(self, obj, objtype):
if obj:
obj.load()
return getattr(obj, self.slot_name)
else:
return None
def __set__(self, obj, value):
self.validate(value)
# Fire off any on_set functions
try:
if obj._loaded and hasattr(obj, "on_set_%s" % self.name):
fnc = getattr(obj, "on_set_%s" % self.name)
value = fnc(value)
except Exception:
boto.log.exception("Exception running on_set_%s" % self.name)
setattr(obj, self.slot_name, value)
def __property_config__(self, model_class, property_name):
self.model_class = model_class
self.name = property_name
self.slot_name = '_' + self.name
def default_validator(self, value):
if isinstance(value, six.string_types) or value == self.default_value():
return
if not isinstance(value, self.data_type):
raise TypeError('Validation Error, %s.%s expecting %s, got %s' % (self.model_class.__name__, self.name, self.data_type, type(value)))
def default_value(self):
return self.default
def validate(self, value):
if self.required and value is None:
raise ValueError('%s is a required property' % self.name)
if self.choices and value and value not in self.choices:
raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name))
if self.validator:
self.validator(value)
else:
self.default_validator(value)
return value
def empty(self, value):
return not value
def get_value_for_datastore(self, model_instance):
return getattr(model_instance, self.name)
def make_value_from_datastore(self, value):
return value
def get_choices(self):
if callable(self.choices):
return self.choices()
return self.choices
def validate_string(value):
if value is None:
return
elif isinstance(value, six.string_types):
if len(value) > 1024:
raise ValueError('Length of value greater than maxlength')
else:
raise TypeError('Expecting String, got %s' % type(value))
class StringProperty(Property):
type_name = 'String'
def __init__(self, verbose_name=None, name=None, default='',
required=False, validator=validate_string,
choices=None, unique=False):
super(StringProperty, self).__init__(verbose_name, name, default, required,
validator, choices, unique)
class TextProperty(Property):
type_name = 'Text'
def __init__(self, verbose_name=None, name=None, default='',
required=False, validator=None, choices=None,
unique=False, max_length=None):
super(TextProperty, self).__init__(verbose_name, name, default, required,
validator, choices, unique)
self.max_length = max_length
def validate(self, value):
value = super(TextProperty, self).validate(value)
if not isinstance(value, six.string_types):
raise TypeError('Expecting Text, got %s' % type(value))
if self.max_length and len(value) > self.max_length:
raise ValueError('Length of value greater than maxlength %s' % self.max_length)
class PasswordProperty(StringProperty):
"""
Hashed property whose original value can not be
retrieved, but still can be compared.
Works by storing a hash of the original value instead
of the original value. Once that's done all that
can be retrieved is the hash.
The comparison
obj.password == 'foo'
generates a hash of 'foo' and compares it to the
stored hash.
Underlying data type for hashing, storing, and comparing
is boto.utils.Password. The default hash function is
defined there ( currently sha512 in most cases, md5
where sha512 is not available )
It's unlikely you'll ever need to use a different hash
function, but if you do, you can control the behavior
in one of two ways:
1) Specifying hashfunc in PasswordProperty constructor
import hashlib
class MyModel(model):
password = PasswordProperty(hashfunc=hashlib.sha224)
2) Subclassing Password and PasswordProperty
class SHA224Password(Password):
hashfunc=hashlib.sha224
class SHA224PasswordProperty(PasswordProperty):
data_type=MyPassword
type_name="MyPassword"
class MyModel(Model):
password = SHA224PasswordProperty()
"""
data_type = Password
type_name = 'Password'
def __init__(self, verbose_name=None, name=None, default='', required=False,
validator=None, choices=None, unique=False, hashfunc=None):
"""
The hashfunc parameter overrides the default hashfunc in boto.utils.Password.
The remaining parameters are passed through to StringProperty.__init__"""
super(PasswordProperty, self).__init__(verbose_name, name, default, required,
validator, choices, unique)
self.hashfunc = hashfunc
def make_value_from_datastore(self, value):
p = self.data_type(value, hashfunc=self.hashfunc)
return p
def get_value_for_datastore(self, model_instance):
value = super(PasswordProperty, self).get_value_for_datastore(model_instance)
if value and len(value):
return str(value)
else:
return None
def __set__(self, obj, value):
if not isinstance(value, self.data_type):
p = self.data_type(hashfunc=self.hashfunc)
p.set(value)
value = p
super(PasswordProperty, self).__set__(obj, value)
def __get__(self, obj, objtype):
return self.data_type(super(PasswordProperty, self).__get__(obj, objtype), hashfunc=self.hashfunc)
def validate(self, value):
value = super(PasswordProperty, self).validate(value)
if isinstance(value, self.data_type):
if len(value) > 1024:
raise ValueError('Length of value greater than maxlength')
else:
raise TypeError('Expecting %s, got %s' % (type(self.data_type), type(value)))
class BlobProperty(Property):
data_type = Blob
type_name = "blob"
def __set__(self, obj, value):
if value != self.default_value():
if not isinstance(value, Blob):
oldb = self.__get__(obj, type(obj))
id = None
if oldb:
id = oldb.id
b = Blob(value=value, id=id)
value = b
super(BlobProperty, self).__set__(obj, value)
class S3KeyProperty(Property):
data_type = boto.s3.key.Key
type_name = 'S3Key'
validate_regex = "^s3:\/\/([^\/]*)\/(.*)$"
def __init__(self, verbose_name=None, name=None, default=None,
required=False, validator=None, choices=None, unique=False):
super(S3KeyProperty, self).__init__(verbose_name, name, default, required,
validator, choices, unique)
def validate(self, value):
value = super(S3KeyProperty, self).validate(value)
if value == self.default_value() or value == str(self.default_value()):
return self.default_value()
if isinstance(value, self.data_type):
return
match = re.match(self.validate_regex, value)
if match:
return
raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
def __get__(self, obj, objtype):
value = super(S3KeyProperty, self).__get__(obj, objtype)
if value:
if isinstance(value, self.data_type):
return value
match = re.match(self.validate_regex, value)
if match:
s3 = obj._manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
k = bucket.get_key(match.group(2))
if not k:
k = bucket.new_key(match.group(2))
k.set_contents_from_string("")
return k
else:
return value
def get_value_for_datastore(self, model_instance):
value = super(S3KeyProperty, self).get_value_for_datastore(model_instance)
if value:
return "s3://%s/%s" % (value.bucket.name, value.name)
else:
return None
class IntegerProperty(Property):
data_type = int
type_name = 'Integer'
def __init__(self, verbose_name=None, name=None, default=0, required=False,
validator=None, choices=None, unique=False, max=2147483647, min=-2147483648):
super(IntegerProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
self.max = max
self.min = min
def validate(self, value):
value = int(value)
value = super(IntegerProperty, self).validate(value)
if value > self.max:
raise ValueError('Maximum value is %d' % self.max)
if value < self.min:
raise ValueError('Minimum value is %d' % self.min)
return value
def empty(self, value):
return value is None
def __set__(self, obj, value):
if value == "" or value is None:
value = 0
return super(IntegerProperty, self).__set__(obj, value)
class LongProperty(Property):
data_type = long_type
type_name = 'Long'
def __init__(self, verbose_name=None, name=None, default=0, required=False,
validator=None, choices=None, unique=False):
super(LongProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
def validate(self, value):
value = long_type(value)
value = super(LongProperty, self).validate(value)
min = -9223372036854775808
max = 9223372036854775807
if value > max:
raise ValueError('Maximum value is %d' % max)
if value < min:
raise ValueError('Minimum value is %d' % min)
return value
def empty(self, value):
return value is None
class BooleanProperty(Property):
data_type = bool
type_name = 'Boolean'
def __init__(self, verbose_name=None, name=None, default=False, required=False,
validator=None, choices=None, unique=False):
super(BooleanProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
def empty(self, value):
return value is None
class FloatProperty(Property):
data_type = float
type_name = 'Float'
def __init__(self, verbose_name=None, name=None, default=0.0, required=False,
validator=None, choices=None, unique=False):
super(FloatProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
def validate(self, value):
value = float(value)
value = super(FloatProperty, self).validate(value)
return value
def empty(self, value):
return value is None
class DateTimeProperty(Property):
"""This class handles both the datetime.datetime object
And the datetime.date objects. It can return either one,
depending on the value stored in the database"""
data_type = datetime.datetime
type_name = 'DateTime'
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None,
default=None, required=False, validator=None, choices=None, unique=False):
super(DateTimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def default_value(self):
if self.auto_now or self.auto_now_add:
return self.now()
return super(DateTimeProperty, self).default_value()
def validate(self, value):
if value is None:
return
if isinstance(value, datetime.date):
return value
return super(DateTimeProperty, self).validate(value)
def get_value_for_datastore(self, model_instance):
if self.auto_now:
setattr(model_instance, self.name, self.now())
return super(DateTimeProperty, self).get_value_for_datastore(model_instance)
def now(self):
return datetime.datetime.utcnow()
class DateProperty(Property):
data_type = datetime.date
type_name = 'Date'
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None,
default=None, required=False, validator=None, choices=None, unique=False):
super(DateProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def default_value(self):
if self.auto_now or self.auto_now_add:
return self.now()
return super(DateProperty, self).default_value()
def validate(self, value):
value = super(DateProperty, self).validate(value)
if value is None:
return
if not isinstance(value, self.data_type):
raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
def get_value_for_datastore(self, model_instance):
if self.auto_now:
setattr(model_instance, self.name, self.now())
val = super(DateProperty, self).get_value_for_datastore(model_instance)
if isinstance(val, datetime.datetime):
val = val.date()
return val
def now(self):
return datetime.date.today()
class TimeProperty(Property):
data_type = datetime.time
type_name = 'Time'
def __init__(self, verbose_name=None, name=None,
default=None, required=False, validator=None, choices=None, unique=False):
super(TimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
def validate(self, value):
value = super(TimeProperty, self).validate(value)
if value is None:
return
if not isinstance(value, self.data_type):
raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
class ReferenceProperty(Property):
data_type = Key
type_name = 'Reference'
def __init__(self, reference_class=None, collection_name=None,
verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False):
super(ReferenceProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique)
self.reference_class = reference_class
self.collection_name = collection_name
def __get__(self, obj, objtype):
if obj:
value = getattr(obj, self.slot_name)
if value == self.default_value():
return value
# If the value is still the UUID for the referenced object, we need to create
# the object now that is the attribute has actually been accessed. This lazy
# instantiation saves unnecessary roundtrips to SimpleDB
if isinstance(value, six.string_types):
value = self.reference_class(value)
setattr(obj, self.name, value)
return value
def __set__(self, obj, value):
"""Don't allow this object to be associated to itself
This causes bad things to happen"""
if value is not None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)):
raise ValueError("Can not associate an object with itself!")
return super(ReferenceProperty, self).__set__(obj, value)
def __property_config__(self, model_class, property_name):
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if self.collection_name is None:
self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name)
if hasattr(self.reference_class, self.collection_name):
raise ValueError('duplicate property: %s' % self.collection_name)
setattr(self.reference_class, self.collection_name,
_ReverseReferenceProperty(model_class, property_name, self.collection_name))
def check_uuid(self, value):
# This does a bit of hand waving to "type check" the string
t = value.split('-')
if len(t) != 5:
raise ValueError
def check_instance(self, value):
try:
obj_lineage = value.get_lineage()
cls_lineage = self.reference_class.get_lineage()
if obj_lineage.startswith(cls_lineage):
return
raise TypeError('%s not instance of %s' % (obj_lineage, cls_lineage))
except:
raise ValueError('%s is not a Model' % value)
def validate(self, value):
if self.validator:
self.validator(value)
if self.required and value is None:
raise ValueError('%s is a required property' % self.name)
if value == self.default_value():
return
if not isinstance(value, six.string_types):
self.check_instance(value)
class _ReverseReferenceProperty(Property):
data_type = Query
type_name = 'query'
def __init__(self, model, prop, name):
self.__model = model
self.__property = prop
self.collection_name = prop
self.name = name
self.item_type = model
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None:
query = Query(self.__model)
if isinstance(self.__property, list):
props = []
for prop in self.__property:
props.append("%s =" % prop)
return query.filter(props, model_instance)
else:
return query.filter(self.__property + ' =', model_instance)
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise ValueError('Virtual property is read-only')
class CalculatedProperty(Property):
def __init__(self, verbose_name=None, name=None, default=None,
required=False, validator=None, choices=None,
calculated_type=int, unique=False, use_method=False):
super(CalculatedProperty, self).__init__(verbose_name, name, default, required,
validator, choices, unique)
self.calculated_type = calculated_type
self.use_method = use_method
def __get__(self, obj, objtype):
value = self.default_value()
if obj:
try:
value = getattr(obj, self.slot_name)
if self.use_method:
value = value()
except AttributeError:
pass
return value
def __set__(self, obj, value):
"""Not possible to set a new AutoID."""
pass
def _set_direct(self, obj, value):
if not self.use_method:
setattr(obj, self.slot_name, value)
def get_value_for_datastore(self, model_instance):
if self.calculated_type in [str, int, bool]:
value = self.__get__(model_instance, model_instance.__class__)
return value
else:
return None
class ListProperty(Property):
data_type = list
type_name = 'List'
def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds):
if default is None:
default = []
self.item_type = item_type
super(ListProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds)
def validate(self, value):
if self.validator:
self.validator(value)
if value is not None:
if not isinstance(value, list):
value = [value]
if self.item_type in six.integer_types:
item_type = six.integer_types
elif self.item_type in six.string_types:
item_type = six.string_types
else:
item_type = self.item_type
for item in value:
if not isinstance(item, item_type):
if item_type == six.integer_types:
raise ValueError('Items in the %s list must all be integers.' % self.name)
else:
raise ValueError('Items in the %s list must all be %s instances' %
(self.name, self.item_type.__name__))
return value
def empty(self, value):
return value is None
def default_value(self):
return list(super(ListProperty, self).default_value())
def __set__(self, obj, value):
"""Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in"""
if self.item_type in six.integer_types:
item_type = six.integer_types
elif self.item_type in six.string_types:
item_type = six.string_types
else:
item_type = self.item_type
if isinstance(value, item_type):
value = [value]
elif value is None: # Override to allow them to set this to "None" to remove everything
value = []
return super(ListProperty, self).__set__(obj, value)
class MapProperty(Property):
data_type = dict
type_name = 'Map'
def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds):
if default is None:
default = {}
self.item_type = item_type
super(MapProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds)
def validate(self, value):
value = super(MapProperty, self).validate(value)
if value is not None:
if not isinstance(value, dict):
raise ValueError('Value must of type dict')
if self.item_type in six.integer_types:
item_type = six.integer_types
elif self.item_type in six.string_types:
item_type = six.string_types
else:
item_type = self.item_type
for key in value:
if not isinstance(value[key], item_type):
if item_type == six.integer_types:
raise ValueError('Values in the %s Map must all be integers.' % self.name)
else:
raise ValueError('Values in the %s Map must all be %s instances' %
(self.name, self.item_type.__name__))
return value
def empty(self, value):
return value is None
def default_value(self):
return {}
| mit |
goroboro/TVHeadFish | qml/pages/helper/tsreader.py | 7 | 4957 | from dvb_charset_tables import conv_8859_table
def str2hex ( s, n = None ):
r = ''
i = 0
for c in s:
r = r + ('%02X ' % ord(c))
i = i + 1
if n is not None and i % n == 0:
r = r + '\n'
return r
def dvb_convert_date ( data ):
return 0
convert_iso_8859 = [
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13
]
def encode_utf8 ( c ):
if c <= 0x7f:
return chr(c)
elif c <= 0x7ff:
return unichr((c >> 6 & 0x1f) | 0xc0) + unichr((c & 0x3f) | 0x80)
return ''
def conv_8859 ( tnum, data ):
r = u''
print 'TBL %d' % tnum
tbl = conv_8859_table[tnum]
for c in data:
if ord(c) <= 0x7f:
r = r + c
elif ord(c) <= 0x9f:
r = r + ' '
else:
uc = tbl[ord(c) - 0xa0]
if uc:
r = r + encode_utf8(uc)
return r
def dvb_convert_string ( data, conv ):
print 'convert(%d)' % conv
print repr(data)
if not conv: return data
return conv_8859(conv, data)
class TsPacket:
def __init__ ( self, data ):
#print 'TS Packet:'
#print str2hex(data, 16)
hdr = map(ord, data[:4])
if hdr[0] != 0x47:
raise Exception('not valid TS packet')
self.tx_err = (hdr[1] & 0x80) == 0x80
self.pl_init = (hdr[1] & 0x40) == 0x40
self.tx_prio = (hdr[1] & 0x20) == 0x20
self.pid = ((hdr[1] & 0x1F) << 8) + hdr[2]
self.tx_scr = (hdr[3] & 0xC0) >> 6
self.adapt = (hdr[3] & 0x30) >> 4
self.cont = (hdr[3] & 0x0F)
self.data = data[4:]
class TsSection:
def __init__ ( self, pid, data ):
hdr = map(ord, data[:3])
self.pid = pid
self.tid = hdr[0]
self.iscrc = (hdr[1] & 0x80) == 0x80
self.len = ((hdr[1] & 0x0F) << 8) + hdr[2]
self.data = data[3:]
#print 'TS Section:'
#print hdr
#print self.tid, self.len, len(data)
def process ( self ):
print 'TS Section:'
print self.tid, self.len, len(self.data)
#print str2hex(self.data, 16)
#print self.data
# Strip header
hdr = map(ord, self.data[:11])
plen = self.len - 11
data = self.data[11:]
#print str2hex(data, 16)
# Process each event
while plen:
r = self.process_event(data, plen)
if r < 0: break
plen = plen - r
data = data[r:]
def get_string ( self, data, dlen, charset ):
#print 'get_string():'
#print str2hex(data, 16)
l = ord(data[0])
if not l: return (None, 0)
#print l, dlen
if l + 1 > dlen: return (None, -1)
c = ord(data[1])
print c
conv = None
if c == 0: return (None, -1)
elif c <= 0xb:
conv = convert_iso_8859[c + 4]
data = data[1:]
dlen = dlen - 1
elif c <= 0xf: return (None, -1)
elif c == 0x10: conv = 0
elif c <= 0x14: return (None, -1)
elif c == 0x15: conv = 0
elif c <= 0x1f: return (None, -1)
else:
conv = 'default'
s = dvb_convert_string(data[1:1+l], conv)
return (s, l+1)
def short_event ( self, data, dlen ):
if dlen < 5: return None
lang = data[:3]
(title, l) = self.get_string(data[3:], dlen-3, None)
if l < 0: return None
(sumry, l) = self.get_string(data[3+l:], dlen-3-l, None)
return (title, sumry)
def process_event ( self, data, elen ):
if (elen < 12): return -1
# Get lengths
hdr = map(ord, data[:12])
dllen = ((hdr[10] & 0x0F) << 8) + hdr[11]
data = data[12:]
elen = elen - 12
if elen < dllen: return -1
ret = 12 + dllen
# Header info
eid = (hdr[0] << 8) + hdr[1]
start = dvb_convert_date(hdr[2:])
print 'process event (%d):' % dllen
print ' EID : %d' % eid
print ' START : %d' % start
while dllen > 2:
dtag = ord(data[0])
dlen = ord(data[1])
print 'dtag = 0x%02x, dlen = %d' % (dtag, dlen)
dllen = dllen - 2
data = data[2:]
if dllen < dlen: return ret
if dtag == 0x4d:
(title, summary) = self.short_event(data, dlen)
print ' TITLE : %s' % title
print ' SUMMARY : %s' % summary
dllen = dllen - dlen
data = data[dlen:]
return ret
if __name__ == '__main__':
import os, sys
fp = open(sys.argv[1])
cur = nxt = None
while True:
pkt = TsPacket(fp.read(188))
# Restrict to EIT
if pkt.pid != 0x12: continue
# Start/End
if pkt.pl_init:
ptr = ord(pkt.data[0])
if ptr == 0x00:
cur = TsSection(pkt.pid, pkt.data[1:])
else:
if cur:
cur.data = cur.data + pkt.data[:1+ptr]
nxt = TsSection(pkt.pid, pkt.data[1+ptr:])
# Middle
elif cur:
cur.data = cur.data + pkt.data
# Complete?
if cur:
if len(cur.data) >= cur.len:
print 'Process Section:'
#try:
cur.process()
#except: pass
cur = None
print
sys.exit(0)
else:
print 'waiting for %d bytes' % (cur.len - len(cur.data))
# Next
if nxt: cur = nxt
| apache-2.0 |
shashank971/edx-platform | openedx/core/djangoapps/course_groups/partition_scheme.py | 105 | 4510 | """
Provides a UserPartition driver for cohorts.
"""
import logging
from courseware.masquerade import ( # pylint: disable=import-error
get_course_masquerade,
get_masquerading_group_info,
is_masquerading_as_specific_student,
)
from xmodule.partitions.partitions import NoSuchUserPartitionGroupError
from .cohorts import get_cohort, get_group_info_for_cohort
log = logging.getLogger(__name__)
class CohortPartitionScheme(object):
"""
This scheme uses lms cohorts (CourseUserGroups) and cohort-partition
mappings (CourseUserGroupPartitionGroup) to map lms users into Partition
Groups.
"""
# pylint: disable=unused-argument
@classmethod
def get_group_for_user(cls, course_key, user, user_partition, track_function=None, use_cached=True):
"""
Returns the Group from the specified user partition to which the user
is assigned, via their cohort membership and any mappings from cohorts
to partitions / groups that might exist.
If the user has not yet been assigned to a cohort, an assignment *might*
be created on-the-fly, as determined by the course's cohort config.
Any such side-effects will be triggered inside the call to
cohorts.get_cohort().
If the user has no cohort mapping, or there is no (valid) cohort ->
partition group mapping found, the function returns None.
"""
# First, check if we have to deal with masquerading.
# If the current user is masquerading as a specific student, use the
# same logic as normal to return that student's group. If the current
# user is masquerading as a generic student in a specific group, then
# return that group.
if get_course_masquerade(user, course_key) and not is_masquerading_as_specific_student(user, course_key):
group_id, user_partition_id = get_masquerading_group_info(user, course_key)
if user_partition_id == user_partition.id and group_id is not None:
try:
return user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
return None
# The user is masquerading as a generic student. We can't show any particular group.
return None
cohort = get_cohort(user, course_key, use_cached=use_cached)
if cohort is None:
# student doesn't have a cohort
return None
group_id, partition_id = get_group_info_for_cohort(cohort, use_cached=use_cached)
if partition_id is None:
# cohort isn't mapped to any partition group.
return None
if partition_id != user_partition.id:
# if we have a match but the partition doesn't match the requested
# one it means the mapping is invalid. the previous state of the
# partition configuration may have been modified.
log.warn(
"partition mismatch in CohortPartitionScheme: %r",
{
"requested_partition_id": user_partition.id,
"found_partition_id": partition_id,
"found_group_id": group_id,
"cohort_id": cohort.id,
}
)
# fail silently
return None
try:
return user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
# if we have a match but the group doesn't exist in the partition,
# it means the mapping is invalid. the previous state of the
# partition configuration may have been modified.
log.warn(
"group not found in CohortPartitionScheme: %r",
{
"requested_partition_id": user_partition.id,
"requested_group_id": group_id,
"cohort_id": cohort.id,
},
exc_info=True
)
# fail silently
return None
def get_cohorted_user_partition(course):
"""
Returns the first user partition from the specified course which uses the CohortPartitionScheme,
or None if one is not found. Note that it is currently recommended that each course have only
one cohorted user partition.
"""
for user_partition in course.user_partitions:
if user_partition.scheme == CohortPartitionScheme:
return user_partition
return None
| agpl-3.0 |
pcm17/tensorflow | tensorflow/contrib/distributions/python/ops/inverse_gamma.py | 1 | 10539 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = InverseGamma(concentration=3.0, rate=2.0)
dist2 = InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
contrib_tensor_util.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.rate)
/ math_ops.square(self.concentration - 1.)
/ (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
contrib_tensor_util.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| apache-2.0 |
larrybradley/astropy | astropy/modeling/tests/test_models.py | 4 | 29753 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
# pylint: disable=invalid-name, no-member
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.models import Gaussian2D
from astropy.modeling.core import FittableModel
from astropy.modeling.parameters import Parameter
from astropy.modeling.polynomial import PolynomialBase
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from .example_models import models_1D, models_2D
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
sin_model.evaluate(x, 5., 2.)
sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_inconsistent_input_shapes():
g = Gaussian2D()
x = np.arange(-1., 1, .2)
y = x.copy()
# check scalar input broadcasting works
assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
# but not array broadcasting
x.shape = (10, 1)
y.shape = (1, 10)
result = g(x, y)
assert result.shape == (10, 10)
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2)
sub_arr = model(x1, y1, z1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all(np.abs(model(x, y) - z) < self.eval_error)
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_2D(self, model_class, test_parameters):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
xv, yv = np.meshgrid(x, y)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.default_rng(0)
amplitude = test_parameters['parameters'][0]
n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters,
rtol=0.1)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = 5
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
if isinstance(model, models.Lorentz1D) or isinstance(model, models.Drude1D):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter1D(self, model_class, test_parameters):
"""
Test if the parametric model works with the fitter.
"""
x_lim = test_parameters['x_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
model(x))
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
def test_deriv_1D(self, model_class, test_parameters):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
x_lim = test_parameters['x_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters['parameters']
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
rsn_rand_1234567890 = np.array([
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.15)
def create_model(model_class, test_parameters, use_constraints=True,
parameter_key='parameters'):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if 'constraints' in test_parameters:
constraints = test_parameters['constraints']
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_1D.items(), key=lambda x: str(x[0])))
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_2D.items(), key=lambda x: str(x[0])))
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False),
[[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False),
[[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0., .7, 1.4, 2.1, 3.9]
ans1 = [1., 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
with pytest.raises(ValueError):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False,
fill_value=None)
assert_allclose(model(xextrap),
[1., 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
assert_quantity_allclose(model(np.arange(5)),
[100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_interp_2d():
table = np.array([
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0., .7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array(
[-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
with pytest.raises(ValueError):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
with pytest.raises(ValueError):
model = LookupTable(lookup_table=[1, 2, 3])
with pytest.raises(NotImplementedError):
model = LookupTable(n_models=2)
with pytest.raises(ValueError):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
with pytest.raises(ValueError):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
with pytest.raises(ValueError):
model = LookupTable(points, table, bounds_error=False,
fill_value=1*u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
with pytest.raises(ValueError):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert(p(1) == p(1, with_bounding_box=True))
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]])
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_bounding_box_with_units():
points = np.arange(5)*u.pix
lt = np.arange(5)*u.AA
t = models.Tabular1D(points, lt)
result = t(1*u.pix, with_bounding_box=True)
assert result == 1.*u.AA
assert t.inverse(result, with_bounding_box=True) == 1*u.pix
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1., 2.)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1., 2.)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError):
t.inverse((3.4, 7.))
# Check that Tabular2D.inverse raises an error
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
with pytest.raises(ValueError):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
class classmodel(FittableModel):
f = Parameter(default=1)
x = Parameter(default=0)
y = Parameter(default=2)
def __init__(self, f=f.default, x=x.default, y=y.default):
super().__init__(f, x, y)
def evaluate(self):
pass
class subclassmodel(classmodel):
f = Parameter(default=3, fixed=True)
x = Parameter(default=10)
y = Parameter(default=12)
h = Parameter(default=5)
def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
super().__init__(f, x, y)
def evaluate(self):
pass
def test_parameter_inheritance():
b = subclassmodel()
assert b.param_names == ('f', 'x', 'y', 'h')
assert b.h == 5
assert b.f == 3
assert b.f.fixed == True # noqa: E712
def test_parameter_description():
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
assert model.amplitude_L._description == "The Lorentzian amplitude"
assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
assert model.fwhm_G._description == "The Gaussian full width at half maximum"
| bsd-3-clause |
WorldViews/Spirals | YEI/foo_api.py | 1 | 146347 | #!/usr/bin/env python2.7
from __future__ import print_function
""" This module is an API module for ThreeSpace devices.
The ThreeSpace API module is a collection of classes, functions, structures,
and static variables use exclusivly for ThreeSpace devices. This module can
be used with a system running Python 2.5 and newer (including Python 3.x).
"""
__version__ = "2.0.2.3"
__authors__ = [
'"Chris George" <cgeorge@yeitechnology.com>',
'"Dan Morrison" <dmorrison@yeitechnology.com>',
]
import threading
import sys
import serial
import struct
import collections
import traceback
import time
import os
# chose an implementation, depending on os
if os.name == 'nt': # sys.platform == 'win32':
from win32_threespace_utils import *
else:
from threespace_utils import *
print("WARNING: No additional utils are loaded!!!!!!")
### Globals ###
global_file_path = os.getcwd()
global_error = None
global_counter = 0
global_donglist = {}
global_sensorlist = {}
global_broadcaster = None
TSS_TIMESTAMP_SENSOR = 0
TSS_TIMESTAMP_SYSTEM = 1
TSS_TIMESTAMP_NONE = 2
TSS_JOYSTICK = 0
TSS_MOUSE = 2
TSS_BUTTON_LEFT = 0
TSS_BUTTON_RIGHT = 1
### Private ###
_baudrate = 115200
_allowed_baudrates = [1200, 2400, 4800, 9600, 19200, 28800, 38400, 57600, 115200, 230400, 460800, 921600]
_wireless_retries = 5
### Functions ###
if sys.version_info >= (3, 0):
def makeWriteArray(startbyte, index_byte=None, command_byte=None, data=None):
rtn_array = bytearray((startbyte,))
if index_byte is not None:
rtn_array.append(index_byte)
if command_byte is not None:
rtn_array.append(command_byte)
if data is not None:
rtn_array += data
rtn_array.append((sum(rtn_array) - startbyte) % 256) # checksum
_hexDump(rtn_array)
return rtn_array
else:
def makeWriteArray(startbyte, index_byte=None, command_byte=None, data=None):
rtn_array = chr(startbyte)
if index_byte is not None:
rtn_array += chr(index_byte)
if command_byte is not None:
rtn_array += chr(command_byte)
if data is not None:
rtn_array += data
rtn_array += chr((sum(bytearray(rtn_array)) - startbyte) % 256) # checksum
_hexDump(rtn_array)
return rtn_array
def _hexDump(serial_string, header='i'):
if "-d_hex" in sys.argv:
ba = bytearray(serial_string)
print('{0}('.format(header), end='')
for i in range(len(ba)):
if i == len(ba)-1:
print('0x{0:02x}'.format(ba[i]), end='')
else:
print('0x{0:02x},'.format(ba[i]), end='')
print(')')
def _print(string):
if "-d" in sys.argv:
print(string)
def _echoCallback(sensor, state):
_print('{0}:{1}'.format(sensor, state))
def _generateProtocolHeader(success_failure=False,
timestamp=False,
command_echo=False,
checksum=False,
logical_id=False,
serial_number=False,
data_length=False):
byte = 0
struct_str = '>'
idx_list = []
if success_failure:
byte += 0x1
struct_str += '?'
idx_list.append(0)
if timestamp:
byte += 0x2
struct_str += 'I'
idx_list.append(1)
if command_echo:
byte += 0x4
struct_str += 'B'
idx_list.append(2)
if checksum:
byte += 0x8
struct_str += 'B'
idx_list.append(3)
if logical_id:
byte += 0x10
struct_str += 'B'
idx_list.append(4)
if serial_number:
byte += 0x20
struct_str += 'I'
idx_list.append(5)
if data_length:
byte += 0x40
struct_str += 'B'
idx_list.append(6)
return (byte, struct.Struct(struct_str), idx_list)
def _generateSensorClass(sensor_inst, serial_port, allowed_device_types):
sensor_inst.compatibility = checkSoftwareVersionFromPort(serial_port)
sensor_inst.port_name = serial_port.name
sensor_inst.serial_port_settings = serial_port.getSettingsDict()
sensor_inst.serial_port = serial_port
hardware_version = convertString(sensor_inst.f7WriteRead('getHardwareVersionString'))
dev_type = hardware_version[4:-8].strip()
if dev_type not in allowed_device_types:
raise Exception("This is a %s device, not one of these devices %s!" % (dev_type, allowed_device_types))
sensor_inst.device_type = dev_type
serial_number = sensor_inst.f7WriteRead('getSerialNumber')
sensor_inst.serial_number = serial_number
if dev_type == "DNG":
if serial_number in global_donglist:
rtn_inst = global_donglist[serial_number]
rtn_inst.close()
rtn_inst.compatibility = sensor_inst.compatibility
rtn_inst.port_name = serial_port.name
rtn_inst.serial_port_settings = serial_port.getSettingsDict()
rtn_inst.serial_port = serial_port
return rtn_inst
global_donglist[serial_number] = sensor_inst
else:
if serial_number in global_sensorlist:
rtn_inst = global_sensorlist[serial_number]
rtn_inst.close()
rtn_inst.compatibility = sensor_inst.compatibility
rtn_inst.port_name = serial_port.name
rtn_inst.serial_port_settings = serial_port.getSettingsDict()
rtn_inst.serial_port = serial_port
if "BT" in dev_type:
rtn_inst.serial_port.timeout = 1.5
rtn_inst.serial_port.writeTimeout = 1.5
if "WL" in dev_type:
rtn_inst.switchToWiredMode()
return rtn_inst
if "BT" in dev_type:
sensor_inst.serial_port.timeout = 1.5
sensor_inst.serial_port.writeTimeout = 1.5
elif "WL" in dev_type:
sensor_inst.switchToWiredMode()
global_sensorlist[serial_number] = sensor_inst
return sensor_inst
def parseAxisDirections(axis_byte):
axis_order_num = axis_byte & 7
if axis_order_num == 0:
axis_order = "XYZ"
elif axis_order_num == 1:
axis_order = "XZY"
elif axis_order_num == 2:
axis_order = "YXZ"
elif axis_order_num == 3:
axis_order = "YZX"
elif axis_order_num == 4:
axis_order = "ZXY"
elif axis_order_num == 5:
axis_order = "ZYX"
else:
raise ValueError
neg_x = neg_y = neg_z = False
if (axis_byte & 32) > 0:
neg_x = True
if (axis_byte & 16) > 0:
neg_y = True
if (axis_byte & 8) > 0:
neg_z = True
return axis_order, neg_x, neg_y, neg_z
def generateAxisDirections(axis_order, neg_x=False, neg_y=False, neg_z=False):
axis_order = axis_order.upper()
if axis_order == "XYZ":
axis_byte = 0
elif axis_order == "XZY":
axis_byte = 1
elif axis_order == "YXZ":
axis_byte = 2
elif axis_order == "YZX":
axis_byte = 3
elif axis_order == "ZXY":
axis_byte = 4
elif axis_order == "ZYX":
axis_byte = 5
else:
raise ValueError
if neg_x:
axis_byte = axis_byte | 32
if neg_y:
axis_byte = axis_byte | 16
if neg_z:
axis_byte = axis_byte | 8
return axis_byte
def getSystemWirelessRetries():
return _wireless_retries
def setSystemWirelessRetries(retries):
global _wireless_retries
_wireless_retries = retries
def getDefaultCreateDeviceBaudRate():
return _baudrate
def setDefaultCreateDeviceBaudRate(new_baudrate):
global _baudrate
if new_baudrate in _allowed_baudrates:
_baudrate = new_baudrate
def padProtocolHeader69(header_data, sys_timestamp):
fail_byte, cmd_echo, data_size = header_data
return (fail_byte, sys_timestamp, cmd_echo, None, None, None, data_size)
def padProtocolHeader71(header_data):
fail_byte, timestamp, cmd_echo, data_size = header_data
return (fail_byte, timestamp, cmd_echo, None, None, None, data_size)
def padProtocolHeader85(header_data, sys_timestamp):
fail_byte, cmd_echo, rtn_log_id, data_size = header_data
return (fail_byte, sys_timestamp, cmd_echo, None, rtn_log_id, None, data_size)
def padProtocolHeader87(header_data):
fail_byte, timestamp, cmd_echo, rtn_log_id, data_size = header_data
return (fail_byte, timestamp, cmd_echo, None, rtn_log_id, None, data_size)
### Classes ###
class Broadcaster(object):
def __init__(self):
self.retries = 10
def setRetries(self, retries=10):
self.retries = retries
def sequentialWriteRead(self, command, input_list=None, filter=None):
if filter is None:
filter = list(global_sensorlist.values())
val_list = {}
for i in range(self.retries):
for sensor in reversed(filter):
packet = sensor.writeRead(command, input_list)
if packet[0]: # fail_byte
continue
val_list[sensor.serial_number] = packet
filter.remove(sensor)
if not filter:
break
# _print("##Attempt: {0} complete".format(i))
else:
# _print("sensor failed to succeed")
for sensor in filter:
val_list[sensor.serial_number] = (True, None, None)
return val_list
def writeRead(self, command, input_list=None, filter=None):
q = TSCommandQueue()
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
q.queueWriteRead(sensor, sensor.serial_number, self.retries, command, input_list)
return q.proccessQueue()
def _broadcastMethod(self, filter, method, default=None, *args):
# _print(filter)
if filter is None:
filter = list(global_sensorlist.values())
val_list = {}
for i in range(self.retries):
for sensor in reversed(filter):
packet = getattr(sensor, method)(*args)
if packet is default: # fail_byte
continue
val_list[sensor.serial_number] = packet
filter.remove(sensor)
if not filter:
break
# _print("##Attempt: {0} complete".format(i))
else:
# _print("sensor failed to succeed")
for sensor in filter:
val_list[sensor.serial_number] = default
return val_list
def broadcastMethod(self, method, default=None, args=[], filter=None, callback_func=None):
q = TSCommandQueue()
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
q.queueMethod( getattr(sensor, method),
sensor,
self.retries,
default,
args,
callback_func)
return q.proccessQueue()
def setStreamingSlots(self, slot0='null',
slot1='null',
slot2='null',
slot3='null',
slot4='null',
slot5='null',
slot6='null',
slot7='null',
filter=None,
callback_func=None):
args = (slot0, slot1, slot2, slot3, slot4, slot5, slot6, slot7)
return self.broadcastMethod('setStreamingSlots', False, args, filter, callback_func)
def getStreamingSlots(self, filter=None, callback_func=None):
return self.broadcastMethod('getStreamingSlots', None, [], filter, callback_func)
def startStreaming(self, record_data=False, filter=None, callback_func=None):
return self.broadcastMethod('startStreaming', False, [record_data], filter, callback_func)
def stopStreaming(self, filter=None, callback_func=None):
return self.broadcastMethod('stopStreaming', False, [], filter, callback_func)
def setStreamingTiming(self, interval, duration, delay, delay_offset, filter=None, callback_func=None):
if filter is None:
filter = list(global_sensorlist.values())
else:
filter = list(filter)
val_list = {}
for sensor in reversed(filter):
success = False
for i in range(self.retries):
if sensor.setStreamingTiming(interval, duration, delay):
if callback_func is not None:
callback_func(sensor, True)
success = True
break
# _print("##Attempt: {0} complete".format(i))
if callback_func is not None:
callback_func(sensor, False)
else:
# _print("sensor failed to succeed")
pass
val_list[sensor] = success
filter.remove(sensor)
delay += delay_offset
return val_list
def startRecordingData(self, filter=None, callback_func=None):
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
sensor.record_data = True
if callback_func is not None:
callback_func(sensor, True)
def stopRecordingData(self, filter=None, callback_func=None):
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
sensor.record_data = False
if callback_func is not None:
callback_func(sensor, True)
def debugPrint(self, broadcast_dict):
for sensor, data in broadcast_dict.items():
_print('Sensor {0:08X}: {1}'.format(sensor, data))
class TSCommandQueue(object):
def __init__(self):
self.queue = []
self.return_dict = {}
def queueWriteRead(self, sensor, rtn_key, retries, command, input_list=None):
self.queue.append(("queueWriteRead", sensor, (self.return_dict, rtn_key, retries, command, input_list)))
def queueMethod(self, method_obj, rtn_key, retries, default=None, input_list=None, callback_func=None):
self.queue.append(("queueMethod", (method_obj, rtn_key, retries, default, input_list, callback_func)))
def _queueMethod(self, method_obj, rtn_key, retries, default=None, input_list=None, callback_func=None):
try:
for i in range(retries):
packet = method_obj(*input_list)
if packet is default: # fail_byte
if callback_func is not None:
callback_func(rtn_key, False)
continue
if callback_func is not None:
callback_func(rtn_key, True)
self.return_dict[rtn_key] = packet
break
else:
self.return_dict[rtn_key] = default
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
def createThreads(self):
thread_queue = []
for item in self.queue:
if item[0] == "queueWriteRead":
thread_queue.append(item[1].queueWriteRead(*item[2]))
elif item[0] == "queueMethod":
qThread = threading.Thread(target=self._queueMethod, args=item[1])
thread_queue.append(qThread)
return thread_queue
def proccessQueue(self, clear_queue=False):
thread_queue = self.createThreads()
[qThread.start() for qThread in thread_queue]
[qThread.join() for qThread in thread_queue]
if clear_queue:
self.queue = []
return self.return_dict
# Base class should not be used directly
class _TSBase(object):
command_dict = {
'checkLongCommands': (0x19, 1, '>B', 0, None, 1),
'startStreaming': (0x55, 0, None, 0, None, 1),
'stopStreaming': (0x56, 0, None, 0, None, 1),
'updateCurrentTimestamp': (0x5f, 0, None, 4, '>I', 1),
'setLEDMode': (0xc4, 0, None, 1, '>B', 1),
'getLEDMode': (0xc8, 1, '>B', 0, None, 1),
'_setWiredResponseHeaderBitfield': (0xdd, 0, None, 4, '>I', 1),
'_getWiredResponseHeaderBitfield': (0xde, 4, '>I', 0, None, 1),
'getFirmwareVersionString': (0xdf, 12, '>12s', 0, None, 1),
'commitSettings': (0xe1, 0, None, 0, None, 1),
'softwareReset': (0xe2, 0, None, 0, None, 1),
'getHardwareVersionString': (0xe6, 32, '>32s', 0, None, 1),
'getSerialNumber': (0xed, 4, '>I', 0, None, 1),
'setLEDColor': (0xee, 0, None, 12, '>fff', 1),
'getLEDColor': (0xef, 12, '>fff', 0, None, 1),
'setJoystickAndMousePresentRemoved': (0xfd, 0, None, 2, '>BB', 1),
'getJoystickAndMousePresentRemoved': (0xfe, 2, '>B', 0, None, 1),
'null': (0xff, 0, None, 0, None, 1)
}
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.stream_parse
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
if reinit:
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
def _setupBaseVariables(self):
self.serial_number_hex = '{0:08X}'.format(self.serial_number)
self.stream_timing = None
self.stream_parse = None
self.stream_slot_cmds = ['null'] * 8
self.stream_last_data = None
self.stream_data = []
self.record_data = False
self.data_loop = False
def _setupProtocolHeader(self, success_failure=False,
timestamp=False,
command_echo=False,
checksum=False,
logical_id=False,
serial_number=False,
data_length=False):
protocol_header = _generateProtocolHeader( success_failure,
timestamp,
command_echo,
checksum,
logical_id,
serial_number,
data_length)
protocol_byte, self.header_parse, self.header_idx_lst = protocol_header
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
if d_header != protocol_byte:
self.f7WriteRead('_setWiredResponseHeaderBitfield', protocol_byte)
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
if d_header != protocol_byte:
print("!!!!!fail d_header={0}, protocol_header_byte={1}".format(d_header, protocol_byte))
raise Exception
def _setupThreadedReadLoop(self):
self.read_lock = threading.Condition(threading.Lock())
self.read_queue = collections.deque()
self.read_dict = {}
self.data_loop = True
self.read_thread = threading.Thread(target=self._dataReadLoop)
self.read_thread.daemon = True
self.read_thread.start()
def __repr__(self):
return "<YEI3Space {0}:{1}>".format(self.device_type, self.serial_number_hex)
def __str__(self):
return self.__repr__()
def close(self):
self.data_loop = False
if self.serial_port:
self.serial_port.close()
self.serial_port = None
self.read_thread.join()
def reconnect(self):
self.close()
if not tryPort(self.port_name):
_print("tryport fail")
try:
serial_port = serial.Serial(self.port_name, baudrate=self.baudrate, timeout=0.5, writeTimeout=0.5)
serial_port.applySettingsDict(self.serial_port_settings)
self.serial_port = serial_port
except:
traceback.print_exc()
return False
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
return True
# Wired Old Protocol WriteRead
def f7WriteRead(self, command, input_list=None):
command_args = self.command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data = struct.pack(in_struct, *input_list)
else:
packed_data = struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xf7, None, cmd_byte, packed_data)
self.serial_port.write(write_array)
if out_struct:
output_data = self.serial_port.read(out_len)
rtn_list = struct.unpack(out_struct, output_data)
if len(rtn_list) != 1:
return rtn_list
return rtn_list[0]
# requires the dataloop, do not call
# Wired New Protocol WriteRead
def f9WriteRead(self, command, input_list=None):
global global_counter
command_args = self.command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
if self.compatibility < compatibility:
raise Exception("Firmware for device on ( %s ) is out of date for this function. Recommend updating to latest firmware." % self.serial_port.name)
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data = struct.pack(in_struct, *input_list)
else:
packed_data = struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xf9, None, cmd_byte, packed_data)
self.read_lock.acquire()
uid = global_counter
global_counter += 1
try:
self.serial_port.write(write_array) # release in reader thread
except serial.SerialTimeoutException:
self.read_lock.release()
self.serial_port.close()
# _print("SerialTimeoutException!!!!")
# !!!!!Reconnect
return (True, None, None)
except ValueError:
try:
# _print("trying to open it back up!!!!")
self.serial_port.open()
# _print("aaand open!!!!")
except serial.SerialException:
self.read_lock.release()
# _print("SerialTimeoutException!!!!")
# !!!!!Reconnect
return (True, None, None)
queue_packet = (uid, cmd_byte)
timeout_time = 0.5 + (len(self.read_queue) * 0.150) # timeout increases as queue gets larger
self.read_queue.append(queue_packet)
start_time = time.clock() + timeout_time
read_data = None
while(timeout_time > 0):
self.read_lock.wait(timeout_time)
read_data = self.read_dict.get(uid, None)
if read_data is not None:
break
timeout_time =start_time -time.clock()
# _print("Still waiting {0} {1} {2}".format(uid, command, timeout_time))
else:
# _print("Operation timed out!!!!")
try:
self.read_queue.remove(queue_packet)
except:
traceback.print_exc()
self.read_lock.release()
return (True, None, None)
self.read_lock.release()
del self.read_dict[uid]
header_list, output_data = read_data
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
if cmd_echo != cmd_byte:
# _print("!!!!!!!!cmd_echo!=cmd_byte!!!!!")
# _print('cmd_echo= 0x{0:02x} cmd_byte= 0x{1:02x}'.format(cmd_echo, cmd_byte))
return (True, timestamp, None)
rtn_list = None
if not fail_byte:
if out_struct:
rtn_list = struct.unpack(out_struct, output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
else:
# _print("fail_byte!!!!triggered")
pass
return (fail_byte, timestamp, rtn_list)
writeRead = f9WriteRead
def isConnected(self, try_reconnect=False):
try:
serial = self.getSerialNumber()
if serial is not None:
return True
except:
pass
return False
## generated functions USB and WL_ and DNG and EM_ and DL_ and BT_
## 85(0x55)
def stopStreaming(self):
fail_byte, t_stamp, data = self.writeRead('stopStreaming')
return not fail_byte
## 86(0x56)
def startStreaming(self):
fail_byte, t_stamp, data = self.writeRead('startStreaming')
return not fail_byte
## 95(0x5f)
def updateCurrentTimestamp(self, time, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('updateCurrentTimestamp', time)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 196(0xc4)
def setLEDMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setLEDMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 200(0xc8)
def getLEDMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getLEDMode')
if timestamp:
return (data, t_stamp)
return data
## 223(0xdf)
def getFirmwareVersionString(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getFirmwareVersionString')
data = convertString(data)
if timestamp:
return (data, t_stamp)
return data
## 225(0xe1)
def commitSettings(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('commitSettings')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 230(0xe6)
def getHardwareVersionString(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getHardwareVersionString')
data = convertString(data)
if timestamp:
return (data, t_stamp)
return data
## 237(0xed)
def getSerialNumber(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSerialNumber')
if timestamp:
return (data, t_stamp)
return data
## 238(0xee)
def setLEDColor(self, rgb, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setLEDColor', rgb)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 239(0xef)
def getLEDColor(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getLEDColor')
if timestamp:
return (data, t_stamp)
return data
## 253(0xfd)
def setJoystickAndMousePresentRemoved(self, joystick, mouse, timestamp=False):
arg_list = (joystick, mouse)
fail_byte, t_stamp, data = self.writeRead('setJoystickAndMousePresentRemoved', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 254(0xfe)
def getJoystickAndMousePresentRemoved(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getJoystickAndMousePresentRemoved')
if timestamp:
return (data, t_stamp)
return data
## END generated functions USB and WL_ and DNG and EM_ and DL_ and BT_
class _TSSensor(_TSBase):
command_dict = _TSBase.command_dict.copy()
command_dict.update({
'getTaredOrientationAsQuaternion': (0x0, 16, '>4f', 0, None, 1),
'getTaredOrientationAsEulerAngles': (0x1, 12, '>fff', 0, None, 1),
'getTaredOrientationAsRotationMatrix': (0x2, 36, '>9f', 0, None, 1),
'getTaredOrientationAsAxisAngle': (0x3, 16, '>4f', 0, None, 1),
'getTaredOrientationAsTwoVector': (0x4, 24, '>6f', 0, None, 1),
'getDifferenceQuaternion': (0x5, 16, '>4f', 0, None, 1),
'getUntaredOrientationAsQuaternion': (0x6, 16, '>4f', 0, None, 1),
'getUntaredOrientationAsEulerAngles': (0x7, 12, '>fff', 0, None, 1),
'getUntaredOrientationAsRotationMatrix': (0x8, 36, '>9f', 0, None, 1),
'getUntaredOrientationAsAxisAngle': (0x9, 16, '>4f', 0, None, 1),
'getUntaredOrientationAsTwoVector': (0xa, 24, '>6f', 0, None, 1),
'getTaredTwoVectorInSensorFrame': (0xb, 24, '>6f', 0, None, 1),
'getUntaredTwoVectorInSensorFrame': (0xc, 24, '>6f', 0, None, 1),
'setEulerAngleDecompositionOrder': (0x10, 0, None, 1, '>B', 1),
'setMagnetoresistiveThreshold': (0x11, 0, None, 16, '>fIff', 3),
'setAccelerometerResistanceThreshold': (0x12, 0, None, 8, '>fI', 3),
'offsetWithCurrentOrientation': (0x13, 0, None, 0, None, 3),
'resetBaseOffset': (0x14, 0, None, 0, None, 3),
'offsetWithQuaternion': (0x15, 0, None, 16, '>4f', 3),
'setBaseOffsetWithCurrentOrientation': (0x16, 0, None, 0, None, 3),
'getAllNormalizedComponentSensorData': (0x20, 36, '>9f', 0, None, 1),
'getNormalizedGyroRate': (0x21, 12, '>fff', 0, None, 1),
'getNormalizedAccelerometerVector': (0x22, 12, '>fff', 0, None, 1),
'getNormalizedCompassVector': (0x23, 12, '>fff', 0, None, 1),
'getAllCorrectedComponentSensorData': (0x25, 36, '>9f', 0, None, 1),
'getCorrectedGyroRate': (0x26, 12, '>fff', 0, None, 1),
'getCorrectedAccelerometerVector': (0x27, 12, '>fff', 0, None, 1),
'getCorrectedCompassVector': (0x28, 12, '>fff', 0, None, 1),
'getCorrectedLinearAccelerationInGlobalSpace': (0x29, 12, '>fff', 0, None, 1),
'getTemperatureC': (0x2b, 4, '>f', 0, None, 1),
'getTemperatureF': (0x2c, 4, '>f', 0, None, 1),
'getConfidenceFactor': (0x2d, 4, '>f', 0, None, 1),
'getAllRawComponentSensorData': (0x40, 36, '>9f', 0, None, 1),
'getRawGyroscopeRate': (0x41, 12, '>fff', 0, None, 1),
'getRawAccelerometerData': (0x42, 12, '>fff', 0, None, 1),
'getRawCompassData': (0x43, 12, '>fff', 0, None, 1),
'_setStreamingSlots': (0x50, 0, None, 8, '>8B', 1),
'_getStreamingSlots': (0x51, 8, '>8B', 0, None, 1),
'_setStreamingTiming': (0x52, 0, None, 12, '>III', 1),
'_getStreamingTiming': (0x53, 12, '>III', 0, None, 1),
'_getStreamingBatch': (0x54, 0, None, 0, None, 1),
'tareWithCurrentOrientation': (0x60, 0, None, 0, None, 1),
'tareWithQuaternion': (0x61, 0, None, 16, '>4f', 1),
'tareWithRotationMatrix': (0x62, 0, None, 36, '>9f', 1),
'setStaticAccelerometerTrustValue': (0x63, 0, None, 4, '>f', 2),
'setConfidenceAccelerometerTrustValues': (0x64, 0, None, 8, '>ff', 2),
'setStaticCompassTrustValue': (0x65, 0, None, 4, '>f', 2),
'setConfidenceCompassTrustValues': (0x66, 0, None, 8, '>ff', 2),
'setDesiredUpdateRate': (0x67, 0, None, 4, '>I', 1),
'setReferenceVectorMode': (0x69, 0, None, 1, '>B', 1),
'setOversampleRate': (0x6a, 0, None, 1, '>B', 1),
'setGyroscopeEnabled': (0x6b, 0, None, 1, '>B', 1),
'setAccelerometerEnabled': (0x6c, 0, None, 1, '>B', 1),
'setCompassEnabled': (0x6d, 0, None, 1, '>B', 1),
'setAxisDirections': (0x74, 0, None, 1, '>B', 1),
'setRunningAveragePercent': (0x75, 0, None, 4, '>f', 1),
'setCompassReferenceVector': (0x76, 0, None, 12, '>fff', 1),
'setAccelerometerReferenceVector': (0x77, 0, None, 12, '>fff', 1),
'resetKalmanFilter': (0x78, 0, None, 0, None, 1),
'setAccelerometerRange': (0x79, 0, None, 1, '>B', 1),
'setFilterMode': (0x7b, 0, None, 1, '>B', 1),
'setRunningAverageMode': (0x7c, 0, None, 1, '>B', 1),
'setGyroscopeRange': (0x7d, 0, None, 1, '>B', 1),
'setCompassRange': (0x7e, 0, None, 1, '>B', 1),
'getTareAsQuaternion': (0x80, 16, '>4f', 0, None, 1),
'getTareAsRotationMatrix': (0x81, 36, '>9f', 0, None, 1),
'getAccelerometerTrustValues': (0x82, 8, '>ff', 0, None, 2),
'getCompassTrustValues': (0x83, 8, '>ff', 0, None, 2),
'getCurrentUpdateRate': (0x84, 4, '>I', 0, None, 1),
'getCompassReferenceVector': (0x85, 12, '>fff', 0, None, 1),
'getAccelerometerReferenceVector': (0x86, 12, '>fff', 0, None, 1),
'getGyroscopeEnabledState': (0x8c, 1, '>B', 0, None, 1),
'getAccelerometerEnabledState': (0x8d, 1, '>B', 0, None, 1),
'getCompassEnabledState': (0x8e, 1, '>B', 0, None, 1),
'getAxisDirections': (0x8f, 1, '>B', 0, None, 1),
'getOversampleRate': (0x90, 1, '>B', 0, None, 1),
'getRunningAveragePercent': (0x91, 4, '>f', 0, None, 1),
'getDesiredUpdateRate': (0x92, 4, '>I', 0, None, 1),
'getAccelerometerRange': (0x94, 1, '>B', 0, None, 1),
'getFilterMode': (0x98, 1, '>B', 0, None, 1),
'getRunningAverageMode': (0x99, 1, '>B', 0, None, 1),
'getGyroscopeRange': (0x9a, 1, '>B', 0, None, 1),
'getCompassRange': (0x9b, 1, '>B', 0, None, 1),
'getEulerAngleDecompositionOrder': (0x9c, 1, '>B', 0, None, 1),
'getMagnetoresistiveThreshold': (0x9d, 16, '>fIff', 0, None, 3),
'getAccelerometerResistanceThreshold': (0x9e, 8, '>fI', 0, None, 3),
'getOffsetOrientationAsQuaternion': (0x9f, 16, '>4f', 0, None, 3),
'setCompassCalibrationCoefficients': (0xa0, 0, None, 48, '>12f', 1),
'setAccelerometerCalibrationCoefficients': (0xa1, 0, None, 48, '>12f', 1),
'getCompassCalibrationCoefficients': (0xa2, 48, '>12f', 0, None, 1),
'getAccelerometerCalibrationCoefficients': (0xa3, 48, '>12f', 0, None, 1),
'getGyroscopeCalibrationCoefficients': (0xa4, 48, '>12f', 0, None, 1),
'beginGyroscopeAutoCalibration': (0xa5, 0, None, 0, None, 1),
'setGyroscopeCalibrationCoefficients': (0xa6, 0, None, 48, '>12f', 1),
'setCalibrationMode': (0xa9, 0, None, 1, '>B', 1),
'getCalibrationMode': (0xaa, 1, '>B', 0, None, 1),
'setOrthoCalibrationDataPointFromCurrentOrientation': (0xab, 0, None, 0, None, 1),
'setOrthoCalibrationDataPointFromVector': (0xac, 0, None, 14, '>BBfff', 1),
'getOrthoCalibrationDataPoint': (0xad, 12, '>fff', 2, '>BB', 1),
'performOrthoCalibration': (0xae, 0, None, 0, None, 1),
'clearOrthoCalibrationData': (0xaf, 0, None, 0, None, 1),
'setSleepMode': (0xe3, 0, None, 1, '>B', 1),
'getSleepMode': (0xe4, 1, '>B', 0, None, 1),
'setJoystickEnabled': (0xf0, 0, None, 1, '>B', 1),
'setMouseEnabled': (0xf1, 0, None, 1, '>B', 1),
'getJoystickEnabled': (0xf2, 1, '>B', 0, None, 1),
'getMouseEnabled': (0xf3, 1, '>B', 0, None, 1),
'setControlMode': (0xf4, 0, None, 3, '>BBB', 1),
'setControlData': (0xf5, 0, None, 7, '>BBBf', 1),
'getControlMode': (0xf6, 1, '>B', 2, '>BB', 1),
'getControlData': (0xf7, 4, '>f', 3, '>BBB', 1),
'setMouseAbsoluteRelativeMode': (0xfb, 0, None, 1, '>B', 1),
'getMouseAbsoluteRelativeMode': (0xfc, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["!BASE"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
return _generateSensorClass(new_inst, serial_port, _TSSensor._device_types)
_print('Error serial port was not made')
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.stream_parse
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self.callback_func = None
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.latest_lock = threading.Condition(threading.Lock())
self.new_data = False
if reinit:
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
def _queueWriteRead(self, rtn_dict, rtn_key, retries, command, input_list=None):
try:
for i in range(retries):
packet = self.writeRead(command, input_list)
if packet[0]:
# _print("##Attempt: {0} complete".format(i))
time.sleep(0.1)
continue
rtn_dict[rtn_key] = packet
break
else:
# _print("sensor failed to succeed")
rtn_dict[rtn_key] = (True, None, None)
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
def queueWriteRead(self, rtn_dict, rtn_key, retries, command, input_list=None):
return threading.Thread(target=self._queueWriteRead, args=(rtn_dict, rtn_key, retries, command, input_list))
def _generateStreamParse(self):
stream_string = '>'
if self.stream_slot_cmds is None:
self.getStreamingSlots()
for slot_cmd in self.stream_slot_cmds:
if slot_cmd is not 'null':
out_struct = self.command_dict[slot_cmd][2]
stream_string += out_struct[1:] # stripping the >
self.stream_parse = struct.Struct(stream_string)
# Set streaming batch command
self.command_dict['_getStreamingBatch'] = (0x54, self.stream_parse.size, stream_string, 0, None, 1)
def _parseStreamData(self, protocol_data, output_data):
rtn_list = self.stream_parse.unpack(output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
self.latest_lock.acquire()
self.new_data = True
self.latest_lock.notify()
self.latest_lock.release()
data = (protocol_data, rtn_list)
self.stream_last_data = data
if self.record_data:
self.stream_data.append(data)
if self.callback_func:
self.callback_func(data)
def _dataReadLoop(self):
while self.data_loop:
try:
self._readDataWiredProHeader()
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
except:
# traceback.print_exc()
# _print("bad _parseStreamData parse")
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
self._read_data = None
try:
self.read_lock.release()
except:
pass
def _readDataWiredProHeader(self):
_serial_port = self.serial_port
# in_wait = _serial_port.inWaiting()
# if in_wait:
# _print('!666! inWaiting = {0}'.format(in_wait))
header_bytes = _serial_port.read(self.header_parse.size)
if header_bytes:
if self.timestamp_mode == TSS_TIMESTAMP_SENSOR:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader71(header_data)
elif self.timestamp_mode == TSS_TIMESTAMP_SYSTEM:
sys_timestamp = time.clock() # time packet was parsed it might been in the system buffer a few ms
sys_timestamp *= 1000000
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader69(header_data, sys_timestamp)
else:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader69(header_data, None)
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
output_data = _serial_port.read(data_size)
if cmd_echo is 0xff:
if data_size:
self._parseStreamData(timestamp, output_data)
return
self.read_lock.acquire()
if len(self.read_queue): # here for a bug in the code
uid, cmd_byte = self.read_queue.popleft()
if cmd_byte == cmd_echo:
self.read_dict[uid] = (header_list, output_data)
self.read_lock.notify() # dies in 3 seconds if there is a writeRead in wait
else:
# _print('Unrequested packet found!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
self.read_queue.appendleft((uid, cmd_byte))
self.read_lock.release()
return
# _print('Unrequested packet found!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
self.read_lock.release()
def getLatestStreamData(self, timeout):
self.latest_lock.acquire()
self.new_data = False
self.latest_lock.wait(timeout)
self.latest_lock.release()
if self.new_data:
return self.stream_last_data
def setNewDataCallBack(self, callback):
self.callback_func = callback
def startRecordingData(self):
self.record_data = True
def stopRecordingData(self):
self.record_data = False
def clearRecordingData(self):
self.stream_data= []
# Convenience functions to replace commands 244(0xf4) and 245(0xf5)
def setGlobalAxis(self, hid_type, config_axis, local_axis, global_axis, deadzone, scale, power):
""" Sets an axis of the desired emulated input device as a 'Global Axis'
style axis. Axis operating under this style use a reference vector
and a consitent local vector to determine the state of the device's
axis. As the local vector rotates, it is projected onto the global
vector. Once the distance of that projection on the global vector
exceeds the inputted "deadzone", the device will begin tranmitting
non-zero values for the device's desired axis.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param config_axis: A string whose value may be either 'X' or 'Y'
for a mouse or 'X', 'Y', or 'Z' for a joystick. This string
defines what axis of the device is to be configured.
@param local_axis: A list of 3 Floats whose value is a normalized
Vector3. This vector represents the sensor's local vector to
track.
@param global_axis: A list of 3 Floats whose value is a normalized
Vector3. This vector represents the global vector to project the
local vector onto (should be orthoginal to the local vector).
@param deadzone: A float that defines the minimum distance necessary
for the device's axis to read a non-zero value.
@param scale: A float that defines the linear scale for the values
being returned for the axis.
@param power: A float whose value is an exponental power used to
further modify data being returned from the sensor.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = hid_type
# Set index
axis_idx = ["X", "Y", "Z"]
if cntl_class == TSS_MOUSE:
axis_idx.pop(-1)
config_axis = config_axis.upper()
cntl_idx = -1
try:
cntl_idx = axis_idx.index(config_axis)
except:
_print("Invalid command for config_axis: {0:s}".format(config_axis))
return False
# Set mode
if not self.setControlMode(cntl_class, cntl_idx, 0):
return False
# Create data array
data_array = local_axis + global_axis + [deadzone, scale, power]
# Set data
for i in range(len(data_array)):
if not self.setControlData(cntl_class, cntl_idx, i, data_array[i]):
return False
return True
def setScreenPointAxis(self, hid_type, config_axis, dist_from_screen, dist_on_axis, collision_component, sensor_dir, button_halt):
""" Sets an axis of the desired emulated input device as a 'Screen Point
Axis' style axis. An axis operating under this style projects a
vector along the sensor's direction vector into a mathmatical plane.
The collision point on the plane is then used to determine what the
device's axis's current value is. The direction vector is rotated
based on the orientation of the sensor.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param config_axis: A string whose value may be either 'X' or 'Y'
for a mouse or 'X', 'Y', or 'Z' for a joystick. This string
defines what axis of the device is to be configured.
@param dist_from_screen: A float whose value is the real world
distance the sensor is from the user's screen. Must be the same
units as dist_on_axis.
@param dist_on_axis: A float whose value is the real world length of
the axis along the user's screen (width of screen for x-axis,
height of screen for y-axis). Must be the same units as
dist_from_screen.
@param collision_component: A string whose value may be 'X', 'Y', or
'Z'. This string defines what component of the look vector's
collision point on the virtual plane to use for manipulating the
device's axis.
@param sensor_dir: A string whose value may be 'X', 'Y', or 'Z'.
This string defines which of the sensor's local axis to use for
creating the vector to collide with the virtual plane.
@param button_halt: A float whose value is a pause time in
milliseconds. When a button is pressed on the emulated device,
transmission of changes to the axis is paused for the inputted
amount of time to prevent undesired motion detection when
pressing buttons.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = hid_type
# Set index
axis_idx = ["X", "Y", "Z"]
if cntl_class == TSS_MOUSE:
axis_idx.pop(-1)
config_axis = config_axis.upper()
cntl_idx = -1
try:
cntl_idx = axis_idx.index(config_axis)
except:
_print("Invalid command for config_axis: {0:s}".format(config_axis))
return False
# Set mode
if not self.setControlMode(cntl_class, cntl_idx, 1):
return False
# Create data array
axis_idx = ["X", "Y", "Z"]
data_array = []
data_array.append(dist_from_screen)
data_array.append(dist_on_axis)
collision_component = collision_component.upper()
try:
data_array.append(axis_idx.index(collision_component))
except:
_print("Invalid command for collision_component: {0:s}".format(collision_component))
return False
sensor_dir = sensor_dir.upper()
try:
data_array.append(axis_idx.index(sensor_dir))
except:
_print("Invalid command for sensor_dir: {0:s}".format(sensor_dir))
return False
data_array.append(0)
data_array.append(0)
data_array.append(0)
data_array.append(button_halt)
data_array.append(0)
data_array.append(0)
# Set data
for i in range(len(data_array)):
if not self.setControlData(cntl_class, cntl_idx, i, data_array[i]):
return False
return True
def disableAxis(self, hid_type, config_axis):
""" Disables an axis on the passed in device.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param config_axis: A string whose value may be either 'X' or 'Y'
for a mouse or 'X', 'Y', or 'Z' for a joystick. This string
defines what axis of the device is to be configured.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = hid_type
# Set index
axis_idx = ["X", "Y", "Z"]
if cntl_class == TSS_MOUSE:
axis_idx.pop(-1)
config_axis = config_axis.upper()
cntl_idx = -1
try:
cntl_idx = axis_idx.index(config_axis)
except:
_print("Invalid command for config_axis: {0:s}".format(config_axis))
return False
# Set mode
return self.setControlMode(cntl_class, cntl_idx, 255)
def setPhysicalButton(self, hid_type, button_idx, button_bind):
""" Binds a sensor's physical button to an emulated device's button.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@param button_bind: An integer whose value defines which physical
button to bind to the emulated device's button to as defined by
button_idx, either TSS_BUTTON_LEFT or TSS_BUTTON_RIGHT.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
if not self.setControlMode(cntl_class, button_idx, 0):
return False
# Create data
if button_bind != TSS_BUTTON_LEFT and button_bind != TSS_BUTTON_RIGHT:
_print("Invalid command for button_bind: {0:d}".format(button_bind))
return False
data = button_bind
# Set data
return self.setControlData(cntl_class, button_idx, 0, data)
def setOrientationButton(self, hid_type, button_idx, local_axis, global_axis, max_dist):
""" Sets up a device's button such that it is 'pressed' when a reference
vector aligns itself with a local vector.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@param local_axis: A list of 3 floats whose value represents a
normalized Vector3. This vector represents the sensor's local
vector to track.
@param global_axis: A list of 3 floats whose value is a normalized
Vector3. This vector represents the global vector to move the
local vector towards for "pressing" (should not be colinear to
the local vector).
@param max_dist: A float whose value defines how close the local
vector's orientation must be to the global vector for the button
to be 'pressed'.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
if not self.setControlMode(cntl_class, button_idx, 1):
return False
# Create data array
data_array = local_axis + global_axis + [max_dist]
# Set data
for i in range(7):
if not self.setControlData(cntl_class, button_idx, i, data_array[i]):
return False
return True
def setShakeButton(self, hid_type, button_idx, threshold):
""" Sets up an emulated device's button such that it is 'pressed' when
the sensor is shaken.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@param threshold: A float whose value defines how many Gs of force
must be experienced by the sensor before the button is
'pressed'.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
if not self.setControlMode(cntl_class, button_idx, 2):
return False
# Create data array
data_array = [0, 0, 0, threshold]
# Set data
for i in range(4):
if not self.setControlData(cntl_class, button_idx, i, data_array[i]):
return False
return True
def disableButton(self, hid_type, button_idx):
""" Disables a button on the passed in emulated device.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
return self.setControlMode(cntl_class, button_idx, 255)
# Convenience functions for setting up simple mouse/joystick implimentations
def setupSimpleMouse(self, diagonal_size, dist_from_screen, aspect_ratio, is_relative=True):
""" Creates a simple emulated mouse device using the features of the
sensor. Left button and right button emulate the mouse's left and
right buttons respectivly and using the sensor as a pointing device
with the front of the device facing towards the screen will move the
mouse cursor.
@param diagonal_size: A float whose value is the real world diagonal
size of the user's screen.
@param dist_from_screen: A float whose value is the real world
distance the sensor is from the user's screen. Must be the same
units as diagonal_size.
@param aspect_ratio: A float whose value is the real world aspect
ratio of the user's screen.
@param is_relative: A boolean whose value expresses whether the
mouse is to operate in relative mode (True) or absolute mode
(False).
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
cur_mouse_rel = self.getMouseAbsoluteRelativeMode()
if cur_mouse_rel != is_relative:
if self.setMouseAbsoluteRelativeMode(is_relative):
fail_byte, t_stamp, data = self.writeRead('softwareReset')
if not fail_byte:
while self.getSerialNumber():
pass
self.close()
time.sleep(5)
while self.reconnect():
pass
unit_hyp = (aspect_ratio ** 2 + 1) ** 0.5
screen_multiplyer = diagonal_size / unit_hyp
screen_width = screen_multiplyer * aspect_ratio
screen_height = screen_multiplyer
_print("Height: {0:2f}".format(screen_height))
_print("Width: {0:2f}".format(screen_width))
self.setScreenPointAxis(TSS_MOUSE, "X", dist_from_screen, screen_width, "X", "Z", 50)
self.setScreenPointAxis(TSS_MOUSE, "Y", dist_from_screen, screen_height, "Y", "Z", 50)
self.setPhysicalButton(TSS_MOUSE, 0, TSS_BUTTON_LEFT)
self.setPhysicalButton(TSS_MOUSE, 1, TSS_BUTTON_RIGHT)
self.disableButton(TSS_MOUSE, 2)
self.disableButton(TSS_MOUSE, 3)
self.disableButton(TSS_MOUSE, 4)
self.disableButton(TSS_MOUSE, 5)
self.disableButton(TSS_MOUSE, 6)
self.disableButton(TSS_MOUSE, 7)
def setupSimpleJoystick(self, deadzone, scale, power, shake_threshold, max_dist):
""" Creates a simple emulated joystick device using the features of the
sensor. The left and right physical buttons on the sensor act as
buttons 0 and 1 for the joystick. Button 2 is a shake button.
Buttons 3 and 4 are pressed when the sensor is rotated +-90 degrees
on the Z-axis. Rotations on the sensor's Y and X axis correspond to
movements on the joystick's X and Y axis.
@param deadzone: A float that defines the minimum distance necessary
for the device's axis to read a non-zero value.
@param scale: A float that defines the linear scale for the values
being returned for the axis.
@param power:A float whose value is an exponental power used to
further modify data being returned from the sensor.
@param shake_threshold: A float whose value defines how many Gs of
force must be experienced by the sensor before the button 2 is
'pressed'.
@param max_dist: A float whose value defines how close the local
vector's orientation must be to the global vector for buttons 3
and 4 are "pressed".
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
self.setGlobalAxis(TSS_JOYSTICK, "X", [1, 0, 0], [0, 0, -1], deadzone, scale, power)
self.setGlobalAxis(TSS_JOYSTICK, "Y", [0, 1, 0], [0, 0, -1], deadzone, scale, power)
self.setPhysicalButton(TSS_JOYSTICK, 0, TSS_BUTTON_LEFT)
self.setPhysicalButton(TSS_JOYSTICK, 1, TSS_BUTTON_RIGHT)
self.setShakeButton(TSS_JOYSTICK, 2, shake_threshold)
self.setOrientationButton(TSS_JOYSTICK, 3, [0, 1, 0], [-1, 0, 0], max_dist)
self.setOrientationButton(TSS_JOYSTICK, 4, [0, 1, 0], [1, 0, 0], max_dist)
self.disableButton(TSS_JOYSTICK, 5)
self.disableButton(TSS_JOYSTICK, 6)
self.disableButton(TSS_JOYSTICK, 7)
# LightGun Functions
def setupSimpleLightgun(self, diagonal_size, dist_from_screen, aspect_ratio, is_relative=True):
""" Creates a simple emulated mouse based lightgun device using the
features of the sensor. Left button of the sensor emulates the
mouse's left button. Shaking the sensor emulates the mouse's right
button. This configuration uses the sensor as a pointing device with
the front of the device facing forward the screen will move the
mouse cursor.
@param diagonal_size: A float whose value is the real world diagonal
size of the user's screen.
@param dist_from_screen: A float whose value is the real world
distance the sensor is from the user's screen. Must be the same
units as diagonal_size.
@param aspect_ratio: A float whose value is the real world aspect
ratio of the user's screen.
@param is_relative: A boolean whose value expresses whether the
mouse is to operate in relative mode (True) or absolute mode
(False).
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
cur_mouse_rel = self.getMouseAbsoluteRelativeMode()
if cur_mouse_rel != is_relative:
if self.setMouseAbsoluteRelativeMode(is_relative):
fail_byte, t_stamp, data = self.writeRead('softwareReset')
if not fail_byte:
while self.getSerialNumber():
pass
self.close()
time.sleep(5)
while self.reconnect():
pass
unit_hyp = (aspect_ratio ** 2 + 1) ** 0.5
screen_multiplyer = diagonal_size / unit_hyp
screen_width = screen_multiplyer * aspect_ratio
screen_height = screen_multiplyer
_print("Height: {0:2f}".format(screen_height))
_print("Width: {0:2f}".format(screen_width))
self.setScreenPointAxis(TSS_MOUSE, "X", dist_from_screen, screen_width, "X", "Z", 50)
self.setScreenPointAxis(TSS_MOUSE, "Y", dist_from_screen, screen_height, "Y", "Z", 50)
self.setPhysicalButton(TSS_MOUSE, 0, TSS_BUTTON_LEFT)
self.setShakeButton(TSS_MOUSE, 1, 1.0)
self.disableButton(TSS_MOUSE, 2)
self.disableButton(TSS_MOUSE, 3)
self.disableButton(TSS_MOUSE, 4)
self.disableButton(TSS_MOUSE, 5)
self.disableButton(TSS_MOUSE, 6)
self.disableButton(TSS_MOUSE, 7)
## 80(0x50)
def setStreamingSlots(self, slot0='null',
slot1='null',
slot2='null',
slot3='null',
slot4='null',
slot5='null',
slot6='null',
slot7='null'):
slots = [slot0, slot1, slot2, slot3, slot4, slot5, slot6, slot7]
slot_bytes = []
for slot in slots:
cmd_byte = self.command_dict[slot][0]
slot_bytes.append(cmd_byte)
fail_byte, timestamp, filler = self.writeRead('_setStreamingSlots', slot_bytes)
self.stream_slot_cmds = slots
self._generateStreamParse()
return not fail_byte
## 81(0x51)
def getStreamingSlots(self):
if self.stream_slot_cmds is None:
self.stream_slot_cmds = ['null'] * 8
fail_byte, timestamp, slot_bytes = self.writeRead('_getStreamingSlots')
need_update = False
if slot_bytes:
for slot_idx in range(len(self.stream_slot_cmds)):
cmd_byte = slot_bytes[slot_idx]
cmd_string = self.reverse_command_dict[cmd_byte]
if self.stream_slot_cmds[slot_idx] != cmd_string:
self.stream_slot_cmds[slot_idx] = cmd_string
need_update = True
if need_update:
self._generateStreamParse()
return self.stream_slot_cmds
## 82(0x52)
def setStreamingTiming(self, interval, duration, delay, timestamp=False):
arg_list = (interval, duration, delay)
fail_byte, t_stamp, data = self.writeRead('_setStreamingTiming', arg_list)
if not fail_byte:
self.stream_timing = arg_list
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 83(0x53)
def getStreamingTiming(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_getStreamingTiming')
if data:
self.stream_timing = data
if timestamp:
return (data, t_stamp)
return data
## 84(0x54)
def getStreamingBatch(self, timestamp=False):
if self.stream_parse is None:
self._generateStreamParse()
fail_byte, t_stamp, data = self.writeRead('_getStreamingBatch')
if timestamp:
return (data, t_stamp)
return data
## 85(0x55)
def stopStreaming(self):
self.record_data = False
fail_byte, timestamp, slot_bytes = self.writeRead('stopStreaming')
return not fail_byte
## 86(0x56)
def startStreaming(self, start_record=False):
self.record_data = start_record
if self.stream_parse is None:
self._generateStreamParse()
fail_byte, timestamp, slot_bytes = self.writeRead('startStreaming')
return not fail_byte
## generated functions USB and WL_ and EM_ and DL_ and BT_
## 0(0x00)
def getTaredOrientationAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 1(0x01)
def getTaredOrientationAsEulerAngles(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsEulerAngles')
if timestamp:
return (data, t_stamp)
return data
## 2(0x02)
def getTaredOrientationAsRotationMatrix(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsRotationMatrix')
if timestamp:
return (data, t_stamp)
return data
## 3(0x03)
def getTaredOrientationAsAxisAngle(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsAxisAngle')
if timestamp:
return (data, t_stamp)
return data
## 4(0x04)
def getTaredOrientationAsTwoVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsTwoVector')
if timestamp:
return (data, t_stamp)
return data
## 5(0x05)
def getDifferenceQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getDifferenceQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 6(0x06)
def getUntaredOrientationAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 7(0x07)
def getUntaredOrientationAsEulerAngles(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsEulerAngles')
if timestamp:
return (data, t_stamp)
return data
## 8(0x08)
def getUntaredOrientationAsRotationMatrix(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsRotationMatrix')
if timestamp:
return (data, t_stamp)
return data
## 9(0x09)
def getUntaredOrientationAsAxisAngle(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsAxisAngle')
if timestamp:
return (data, t_stamp)
return data
## 10(0x0a)
def getUntaredOrientationAsTwoVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsTwoVector')
if timestamp:
return (data, t_stamp)
return data
## 11(0x0b)
def getTaredTwoVectorInSensorFrame(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredTwoVectorInSensorFrame')
if timestamp:
return (data, t_stamp)
return data
## 12(0x0c)
def getUntaredTwoVectorInSensorFrame(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredTwoVectorInSensorFrame')
if timestamp:
return (data, t_stamp)
return data
## 16(0x10)
def setEulerAngleDecompositionOrder(self, angle_order, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setEulerAngleDecompositionOrder', angle_order)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 17(0x11)
def setMagnetoresistiveThreshold(self, threshold, trust_frames, lockout_decay, perturbation_detection_value, timestamp=False):
arg_list = (threshold, trust_frames, lockout_decay, perturbation_detection_value)
fail_byte, t_stamp, data = self.writeRead('setMagnetoresistiveThreshold', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 18(0x12)
def setAccelerometerResistanceThreshold(self, threshold, lockout_decay, timestamp=False):
arg_list = (threshold, lockout_decay)
fail_byte, t_stamp, data = self.writeRead('setAccelerometerResistanceThreshold', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 19(0x13)
def offsetWithCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('offsetWithCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 20(0x14)
def resetBaseOffset(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('resetBaseOffset')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 21(0x15)
def offsetWithQuaternion(self, quaternion, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('offsetWithQuaternion', quaternion)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 22(0x16)
def setBaseOffsetWithCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setBaseOffsetWithCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 32(0x20)
def getAllNormalizedComponentSensorData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAllNormalizedComponentSensorData')
if timestamp:
return (data, t_stamp)
return data
## 33(0x21)
def getNormalizedGyroRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getNormalizedGyroRate')
if timestamp:
return (data, t_stamp)
return data
## 34(0x22)
def getNormalizedAccelerometerVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getNormalizedAccelerometerVector')
if timestamp:
return (data, t_stamp)
return data
## 35(0x23)
def getNormalizedCompassVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getNormalizedCompassVector')
if timestamp:
return (data, t_stamp)
return data
## 37(0x25)
def getAllCorrectedComponentSensorData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAllCorrectedComponentSensorData')
if timestamp:
return (data, t_stamp)
return data
## 38(0x26)
def getCorrectedGyroRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedGyroRate')
if timestamp:
return (data, t_stamp)
return data
## 39(0x27)
def getCorrectedAccelerometerVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedAccelerometerVector')
if timestamp:
return (data, t_stamp)
return data
## 40(0x28)
def getCorrectedCompassVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedCompassVector')
if timestamp:
return (data, t_stamp)
return data
## 41(0x29)
def getCorrectedLinearAccelerationInGlobalSpace(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedLinearAccelerationInGlobalSpace')
if timestamp:
return (data, t_stamp)
return data
## 43(0x2b)
def getTemperatureC(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTemperatureC')
if timestamp:
return (data, t_stamp)
return data
## 44(0x2c)
def getTemperatureF(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTemperatureF')
if timestamp:
return (data, t_stamp)
return data
## 45(0x2d)
def getConfidenceFactor(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getConfidenceFactor')
if timestamp:
return (data, t_stamp)
return data
## 64(0x40)
def getAllRawComponentSensorData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAllRawComponentSensorData')
if timestamp:
return (data, t_stamp)
return data
## 65(0x41)
def getRawGyroscopeRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRawGyroscopeRate')
if timestamp:
return (data, t_stamp)
return data
## 66(0x42)
def getRawAccelerometerData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRawAccelerometerData')
if timestamp:
return (data, t_stamp)
return data
## 67(0x43)
def getRawCompassData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRawCompassData')
if timestamp:
return (data, t_stamp)
return data
## 96(0x60)
def tareWithCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('tareWithCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 97(0x61)
def tareWithQuaternion(self, quaternion, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('tareWithQuaternion', quaternion)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 98(0x62)
def tareWithRotationMatrix(self, rotation_matrix, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('tareWithRotationMatrix', rotation_matrix)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 99(0x63)
def setStaticAccelerometerTrustValue(self, trust_value, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setStaticAccelerometerTrustValue', trust_value)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 100(0x64)
def setConfidenceAccelerometerTrustValues(self, min_trust_value, max_trust_value, timestamp=False):
arg_list = (min_trust_value, max_trust_value)
fail_byte, t_stamp, data = self.writeRead('setConfidenceAccelerometerTrustValues', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 101(0x65)
def setStaticCompassTrustValue(self, trust_value, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setStaticCompassTrustValue', trust_value)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 102(0x66)
def setConfidenceCompassTrustValues(self, min_trust_value, max_trust_value, timestamp=False):
arg_list = (min_trust_value, max_trust_value)
fail_byte, t_stamp, data = self.writeRead('setConfidenceCompassTrustValues', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 103(0x67)
def setDesiredUpdateRate(self, update_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setDesiredUpdateRate', update_rate)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 105(0x69)
def setReferenceVectorMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setReferenceVectorMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 106(0x6a)
def setOversampleRate(self, samples_per_iteration, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setOversampleRate', samples_per_iteration)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 107(0x6b)
def setGyroscopeEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setGyroscopeEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 108(0x6c)
def setAccelerometerEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAccelerometerEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 109(0x6d)
def setCompassEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCompassEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 116(0x74)
def setAxisDirections(self, axis_direction_byte, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAxisDirections', axis_direction_byte)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 117(0x75)
def setRunningAveragePercent(self, running_average_percent, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setRunningAveragePercent', running_average_percent)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 118(0x76)
def setCompassReferenceVector(self, reference_vector, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCompassReferenceVector', reference_vector)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 119(0x77)
def setAccelerometerReferenceVector(self, reference_vector, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAccelerometerReferenceVector', reference_vector)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 120(0x78)
def resetKalmanFilter(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('resetKalmanFilter')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 121(0x79)
def setAccelerometerRange(self, accelerometer_range_setting, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAccelerometerRange', accelerometer_range_setting)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 123(0x7b)
def setFilterMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setFilterMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 124(0x7c)
def setRunningAverageMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setRunningAverageMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 125(0x7d)
def setGyroscopeRange(self, gyroscope_range_setting, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setGyroscopeRange', gyroscope_range_setting)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 126(0x7e)
def setCompassRange(self, compass_range_setting, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCompassRange', compass_range_setting)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 128(0x80)
def getTareAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTareAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 129(0x81)
def getTareAsRotationMatrix(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTareAsRotationMatrix')
if timestamp:
return (data, t_stamp)
return data
## 130(0x82)
def getAccelerometerTrustValues(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerTrustValues')
if timestamp:
return (data, t_stamp)
return data
## 131(0x83)
def getCompassTrustValues(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassTrustValues')
if timestamp:
return (data, t_stamp)
return data
## 132(0x84)
def getCurrentUpdateRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCurrentUpdateRate')
if timestamp:
return (data, t_stamp)
return data
## 133(0x85)
def getCompassReferenceVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassReferenceVector')
if timestamp:
return (data, t_stamp)
return data
## 134(0x86)
def getAccelerometerReferenceVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerReferenceVector')
if timestamp:
return (data, t_stamp)
return data
## 140(0x8c)
def getGyroscopeEnabledState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getGyroscopeEnabledState')
if timestamp:
return (data, t_stamp)
return data
## 141(0x8d)
def getAccelerometerEnabledState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerEnabledState')
if timestamp:
return (data, t_stamp)
return data
## 142(0x8e)
def getCompassEnabledState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassEnabledState')
if timestamp:
return (data, t_stamp)
return data
## 143(0x8f)
def getAxisDirections(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAxisDirections')
if timestamp:
return (data, t_stamp)
return data
## 144(0x90)
def getOversampleRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getOversampleRate')
if timestamp:
return (data, t_stamp)
return data
## 145(0x91)
def getRunningAveragePercent(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRunningAveragePercent')
if timestamp:
return (data, t_stamp)
return data
## 146(0x92)
def getDesiredUpdateRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getDesiredUpdateRate')
if timestamp:
return (data, t_stamp)
return data
## 148(0x94)
def getAccelerometerRange(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerRange')
if timestamp:
return (data, t_stamp)
return data
## 152(0x98)
def getFilterMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getFilterMode')
if timestamp:
return (data, t_stamp)
return data
## 153(0x99)
def getRunningAverageMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRunningAverageMode')
if timestamp:
return (data, t_stamp)
return data
## 154(0x9a)
def getGyroscopeRange(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getGyroscopeRange')
if timestamp:
return (data, t_stamp)
return data
## 155(0x9b)
def getCompassRange(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassRange')
if timestamp:
return (data, t_stamp)
return data
## 156(0x9c)
def getEulerAngleDecompositionOrder(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getEulerAngleDecompositionOrder')
if timestamp:
return (data, t_stamp)
return data
## 157(0x9d)
def getMagnetoresistiveThreshold(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMagnetoresistiveThreshold')
if timestamp:
return (data, t_stamp)
return data
## 158(0x9e)
def getAccelerometerResistanceThreshold(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerResistanceThreshold')
if timestamp:
return (data, t_stamp)
return data
## 159(0x9f)
def getOffsetOrientationAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getOffsetOrientationAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 160(0xa0)
def setCompassCalibrationCoefficients(self, matrix, bias, timestamp=False):
arg_list = []
arg_list.extend(matrix)
arg_list.extend(bias)
fail_byte, t_stamp, data = self.writeRead('setCompassCalibrationCoefficients', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 161(0xa1)
def setAccelerometerCalibrationCoefficients(self, matrix, bias, timestamp=False):
arg_list = []
arg_list.extend(matrix)
arg_list.extend(bias)
fail_byte, t_stamp, data = self.writeRead('setAccelerometerCalibrationCoefficients', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 162(0xa2)
def getCompassCalibrationCoefficients(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassCalibrationCoefficients')
if timestamp:
return (data, t_stamp)
return data
## 163(0xa3)
def getAccelerometerCalibrationCoefficients(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerCalibrationCoefficients')
if timestamp:
return (data, t_stamp)
return data
## 164(0xa4)
def getGyroscopeCalibrationCoefficients(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getGyroscopeCalibrationCoefficients')
if timestamp:
return (data, t_stamp)
return data
## 165(0xa5)
def beginGyroscopeAutoCalibration(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('beginGyroscopeAutoCalibration')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 166(0xa6)
def setGyroscopeCalibrationCoefficients(self, matrix, bias, timestamp=False):
arg_list = []
arg_list.extend(matrix)
arg_list.extend(bias)
fail_byte, t_stamp, data = self.writeRead('setGyroscopeCalibrationCoefficients', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 169(0xa9)
def setCalibrationMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCalibrationMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 170(0xaa)
def getCalibrationMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCalibrationMode')
if timestamp:
return (data, t_stamp)
return data
## 171(0xab)
def setOrthoCalibrationDataPointFromCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setOrthoCalibrationDataPointFromCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 172(0xac)
def setOrthoCalibrationDataPointFromVector(self, type, index, vector, timestamp=False):
arg_list = (type, index, vector)
fail_byte, t_stamp, data = self.writeRead('setOrthoCalibrationDataPointFromVector', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 173(0xad)
def getOrthoCalibrationDataPoint(self, type, index, timestamp=False):
arg_list = (type, index)
fail_byte, t_stamp, data = self.writeRead('getOrthoCalibrationDataPoint', arg_list)
if timestamp:
return (data, t_stamp)
return data
## 174(0xae)
def performOrthoCalibration(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('performOrthoCalibration')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 175(0xaf)
def clearOrthoCalibrationData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('clearOrthoCalibrationData')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 227(0xe3)
def setSleepMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setSleepMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 228(0xe4)
def getSleepMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSleepMode')
if timestamp:
return (data, t_stamp)
return data
## 240(0xf0)
def setJoystickEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setJoystickEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 241(0xf1)
def setMouseEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setMouseEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 242(0xf2)
def getJoystickEnabled(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getJoystickEnabled')
if timestamp:
return (data, t_stamp)
return data
## 243(0xf3)
def getMouseEnabled(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMouseEnabled')
if timestamp:
return (data, t_stamp)
return data
## 244(0xf4)
def setControlMode(self, control_class, control_index, handler_index, timestamp=False):
arg_list = (control_class, control_index, handler_index)
fail_byte, t_stamp, data = self.writeRead('setControlMode', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 245(0xf5)
def setControlData(self, control_class, control_index, data_point_index, data_point, timestamp=False):
arg_list = (control_class, control_index, data_point_index, data_point)
fail_byte, t_stamp, data = self.writeRead('setControlData', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 246(0xf6)
def getControlMode(self, control_class, control_index, timestamp=False):
arg_list = (control_class, control_index)
fail_byte, t_stamp, data = self.writeRead('getControlMode', arg_list)
if timestamp:
return (data, t_stamp)
return data
## 247(0xf7)
def getControlData(self, control_class, control_index, handler_index, timestamp=False):
arg_list = (control_class, control_index, handler_index)
fail_byte, t_stamp, data = self.writeRead('getControlData', arg_list)
if timestamp:
return (data, t_stamp)
return data
## 251(0xfb)
def setMouseAbsoluteRelativeMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setMouseAbsoluteRelativeMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 252(0xfc)
def getMouseAbsoluteRelativeMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMouseAbsoluteRelativeMode')
if timestamp:
return (data, t_stamp)
return data
## END generated functions USB and WL_ and EM_ and DL_ and BT_
class TSUSBSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'_setUARTBaudRate': (0xe7, 0, None, 4, '>I', 1),
'getUARTBaudRate': (0xe8, 4, '>I', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["USB", "USB-HH", "MUSB", "MUSB-HH", "USBWT", "USBWT-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSUSBSensor._device_types)
_print('Error serial port was not made')
## 231(0xe7)
def setUARTBaudRate(self, baud_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_setUARTBaudRate', baud_rate)
if not fail_byte:
self.baudrate = baud_rate
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions USB
## 232(0xe8)
def getUARTBaudRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUARTBaudRate')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions USB
class TSWLSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'_getWirelessPanID': (0xc0, 2, '>H', 0, None, 1),
'_setWirelessPanID': (0xc1, 0, None, 2, '>H', 1),
'_getWirelessChannel': (0xc2, 1, '>B', 0, None, 1),
'_setWirelessChannel': (0xc3, 0, None, 1, '>B', 1),
'commitWirelessSettings': (0xc5, 0, None, 0, None, 1),
'getWirelessAddress': (0xc6, 2, '>H', 0, None, 1),
'getBatteryVoltage': (0xc9, 4, '>f', 0, None, 1),
'getBatteryPercentRemaining': (0xca, 1, '>B', 0, None, 1),
'getBatteryStatus': (0xcb, 1, '>B', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["WL", "WL-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR, logical_id=None, dongle=None):
if com_port is None and logical_id is None and dongle is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
new_inst.dongle = None
new_inst.logical_id = None
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSWLSensor._device_types)
_print('Error serial port was not made')
if logical_id is not None and dongle:
for tries in range(_wireless_retries + 1):
fail_byte, timestamp, serial_number = dongle.faWriteRead(logical_id, 'getSerialNumber')
if not fail_byte:
if serial_number in global_sensorlist:
rtn_inst = global_sensorlist[serial_number]
if rtn_inst.dongle:
_print("sensor was already paired before")
pass
rtn_inst.dongle = dongle
rtn_inst.logical_id = logical_id
dongle.wireless_table[logical_id] = serial_number
rtn_inst.switchToWirelessMode()
return rtn_inst
else:
new_inst = super(_TSSensor, cls).__new__(cls)
for tries in range(_wireless_retries + 1):
fail_byte, timestamp, hardware_version = dongle.faWriteRead(logical_id, 'getHardwareVersionString')
if not fail_byte:
new_inst.device_type = convertString(hardware_version)[4:-8].strip()
break
else:
new_inst.device_type = "WL"
new_inst.dongle = dongle
new_inst.logical_id = logical_id
new_inst.port_name = ""
new_inst.serial_port_settings = {}
new_inst.serial_port = None
new_inst.switchToWirelessMode()
new_inst.serial_number = serial_number
global_sensorlist[serial_number] = new_inst
return new_inst
_print("raise wireless fail error here")
return None
_print('this sould never happen')
return None
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR, logical_id=None, dongle=None):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.stream_parse
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self.callback_func = None
if self.serial_port and not self.data_loop:
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.latest_lock = threading.Condition(threading.Lock())
self.new_data = False
if reinit:
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
def close(self):
if self.serial_port is not None:
super(TSWLSensor, self).close()
def _wirlessWriteRead(self, command, input_list=None):
result = (True, None, None)
for i in range(_wireless_retries + 1):
result = self.dongle.faWriteRead(self.logical_id, command, input_list)
if not result[0]:
break
return result
def switchToWirelessMode(self):
if self.dongle and self.logical_id is not None:
self.writeRead = self._wirlessWriteRead
self.wireless_com = True
return True
return False
def switchToWiredMode(self):
if self.serial_port:
self.writeRead = self.f9WriteRead
self.wireless_com = False
return True
return False
## 192(0xc0)
def getWirelessPanID(self, timestamp=False):
t_stamp = None
data = None
fail_byte, t_stamp, data = self.writeRead('_getWirelessPanID')
if timestamp:
return (data, t_stamp)
return data
## 193(0xc1)
def setWirelessPanID(self, PanID, timestamp=False):
t_stamp = None
fail_byte = True
if not self.wireless_com:
fail_byte, t_stamp, data = self.writeRead('_setWirelessPanID', PanID)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 194(0xc2)
def getWirelessChannel(self, timestamp=False):
t_stamp = None
data = None
fail_byte, t_stamp, data = self.writeRead('_getWirelessChannel')
if timestamp:
return (data, t_stamp)
return data
## 195(0xc3)
def setWirelessChannel(self, channel, timestamp=False):
t_stamp = None
fail_byte = True
if not self.wireless_com:
fail_byte, t_stamp, data = self.writeRead('_setWirelessChannel', channel)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions WL_
## 197(0xc5)
def commitWirelessSettings(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('commitWirelessSettings')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 198(0xc6)
def getWirelessAddress(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessAddress')
if timestamp:
return (data, t_stamp)
return data
## 201(0xc9)
def getBatteryVoltage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryVoltage')
if timestamp:
return (data, t_stamp)
return data
## 202(0xca)
def getBatteryPercentRemaining(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryPercentRemaining')
if timestamp:
return (data, t_stamp)
return data
## 203(0xcb)
def getBatteryStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryStatus')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions WL_
class TSDongle(_TSBase):
command_dict = _TSBase.command_dict.copy()
command_dict.update({
'setWirelessStreamingAutoFlushMode': (0xb0, 0, None, 1, '>B', 1),
'getWirelessStreamingAutoFlushMode': (0xb1, 1, '>B', 0, None, 1),
'_setWirelessStreamingManualFlushBitfield': (0xb2, 0, None, 2, '>H', 1),
'_getWirelessStreamingManualFlushBitfield': (0xb3, 2, '>H', 0, None, 1),
'_getManualFlushSingle': (0xb4, 0, None, 1, '>B', 1),
'_getManualFlushBulk': (0xb5, 0, None, 0, None, 1),
'broadcastSynchronizationPulse': (0xb6, 0, None, 0, None, 1),
'getReceptionBitfield': (0xb7, 2, '>H', 0, None, 1),
'getWirelessPanID': (0xc0, 2, '>H', 0, None, 1),
'setWirelessPanID': (0xc1, 0, None, 2, '>H', 1),
'getWirelessChannel': (0xc2, 1, '>B', 0, None, 1),
'setWirelessChannel': (0xc3, 0, None, 1, '>B', 1),
'commitWirelessSettings': (0xc5, 0, None, 0, None, 1),
'getWirelessAddress': (0xc6, 2, '>H', 0, None, 1),
'getSerialNumberAtLogicalID': (0xd0, 4, '>I', 1, '>B', 1),
'_setSerialNumberAtLogicalID': (0xd1, 0, None, 5, '>BI', 1),
'getWirelessChannelNoiseLevels': (0xd2, 16, '>16B', 0, None, 1),
'setWirelessRetries': (0xd3, 0, None, 1, '>B', 1),
'getWirelessRetries': (0xd4, 1, '>B', 0, None, 1),
'getWirelessSlotsOpen': (0xd5, 1, '>B', 0, None, 1),
'getSignalStrength': (0xd6, 1, '>B', 0, None, 1),
'setWirelessHIDUpdateRate': (0xd7, 0, None, 1, '>B', 1),
'getWirelessHIDUpdateRate': (0xd8, 1, '>B', 0, None, 1),
'setWirelessHIDAsynchronousMode': (0xd9, 0, None, 1, '>B', 1),
'getWirelessHIDAsynchronousMode': (0xda, 1, '>B', 0, None, 1),
'_setWirelessResponseHeaderBitfield': (0xdb, 0, None, 4, '>I', 1),
'_getWirelessResponseHeaderBitfield': (0xdc, 4, '>I', 0, None, 1),
'setJoystickLogicalID': (0xf0, 0, None, 1, '>B', 1),
'setMouseLogicalID': (0xf1, 0, None, 1, '>B', 1),
'getJoystickLogicalID': (0xf2, 1, '>B', 0, None, 1),
'getMouseLogicalID': (0xf3, 1, '>B', 0, None, 1)
})
wl_command_dict = TSWLSensor.command_dict.copy()
_device_types = ["DNG"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(TSDongle, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.05)
serial_port.flushInput()
checkSoftwareVersionFromPort(serial_port)
serial_port.write(bytearray((0xf7, 0xb7, 0xb7)))
reception_bitfield = struct.unpack('>H', serial_port.read(2))[0]
idx = 1
for i in range(15):
if idx & reception_bitfield:
count = 0
serial_port.write(bytearray((0xf7, 0xd0, i, 0xd0 + i)))
wl_id = struct.unpack('>I', serial_port.read(4))[0]
while count < 15:
count += 1
serial_port.write(bytearray((0xf8, i, 0x56, 0x56 + i)))
did_fail = struct.unpack('>B', serial_port.read(1))[0]
if did_fail:
serial_port.read(1)
else:
_print("Stopped {0:08X} on try {1:d}".format(wl_id, count))
serial_port.read(2)
break
idx <<= 1
return _generateSensorClass(new_inst, serial_port, TSDongle._device_types)
_print('Error serial port was not made')
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'logical_id': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.wireless_table
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.setWirelessStreamingAutoFlushMode(1)
self.startStreaming()
def reconnect(self):
self.close()
if not tryPort(self.port_name):
_print("tryport fail")
try:
serial_port = serial.Serial(self.port_name, baudrate=self.baudrate, timeout=0.5, writeTimeout=0.5)
serial_port.applySettingsDict(self.serial_port_settings)
self.serial_port = serial_port
self.setWirelessStreamingAutoFlushMode(0)
time.sleep(0.05)
self.serial_port.flushInput()
for i in range(15):
serial_port.write(bytearray((0xf7, 0xd0, i, 0xd0 + i)))
for i in range(10):
try:
wl_id = struct.unpack('>I', serial_port.read(4))[0]
except:
continue
break
if wl_id != 0:
count = 0
while count < 25:
count += 1
serial_port.write(bytearray((0xf8, i, 0x56, 0x56 + i)))
did_fail = struct.unpack('>B', serial_port.read(1))[0]
if did_fail:
serial_port.read(1)
else:
_print("Stopped {0:08X} on try {1:d}".format(wl_id, count))
serial_port.read(2)
break
except:
traceback.print_exc()
return False
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.setWirelessStreamingAutoFlushMode(1)
return True
def _setupBaseVariables(self):
self.serial_number_hex = '{0:08X}'.format(self.serial_number)
self.wireless_table = [0] * 15
for i in range(15):
tmp_id = self.f7WriteRead('getSerialNumberAtLogicalID', i)
if tmp_id not in self.wireless_table or tmp_id == 0:
self.wireless_table[i] = tmp_id
else:
self.f7WriteRead('_setSerialNumberAtLogicalID', (i, 0))
def _setupProtocolHeader(self, success_failure=False,
timestamp=False,
command_echo=False,
checksum=False,
logical_id=False,
serial_number=False,
data_length=False):
protocol_header =_generateProtocolHeader( success_failure,
timestamp,
command_echo,
checksum,
logical_id,
serial_number,
data_length)
protocol_byte, self.header_parse, self.header_idx_lst = protocol_header
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
dwl_header = self.f7WriteRead('_getWirelessResponseHeaderBitfield')
if d_header != protocol_byte or dwl_header != protocol_byte:
self.f7WriteRead('_setWiredResponseHeaderBitfield', protocol_byte)
self.f7WriteRead('_setWirelessResponseHeaderBitfield', protocol_byte)
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
dwl_header = self.f7WriteRead('_getWirelessResponseHeaderBitfield')
if d_header != protocol_byte or dwl_header != protocol_byte:
print("!!!!!fail d_header={0}, dwl_header={1}, protocol_header_byte={2}".format(d_header, dwl_header, protocol_byte))
raise Exception
# Wireless Old Protocol WriteRead
def f8WriteRead(self, logical_id, command, input_list=None):
command_args = self.command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data = struct.pack(in_struct, *input_list)
else:
packed_data = struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xf8, logical_id, cmd_byte, packed_data)
self.serial_port.write(write_array)
rtn_list = []
output_data = self.serial_port.read(2)
if len(output_data) == 2:
fail_byte = struct.unpack('>B', output_data[0])[0]
logical_id_byte = struct.unpack('>B', output_data[1])[0]
rtn_list.append(fail_byte)
if not fail_byte:
self.serial_port.read(1)
else:
return True
if out_struct:
output_data = self.serial_port.read(out_len)
rtn_list.append(struct.unpack(out_struct, output_data))
if len(rtn_list) != 1:
return rtn_list
return rtn_list[0]
return True
## Wireless New Protocol WriteRead
def faWriteRead(self, logical_id, command, input_list=None):
global global_counter
command_args = self.wl_command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
if self.compatibility < compatibility:
raise Exception("Firmware for device on ( %s ) is out of date for this function. Recommend updating to latest firmware." % self.serial_port.name)
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data=struct.pack(in_struct, *input_list)
else:
packed_data=struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xfa, logical_id, cmd_byte, packed_data)
while len(self.read_queue) > 15:
_print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!too many commands!!!!!")
time.sleep(0.01)
self.read_lock.acquire()
uid = global_counter
global_counter += 1
try:
self.serial_port.write(write_array) # release in reader thread
except serial.SerialTimeoutException:
self.read_lock.release()
self.serial_port.close()
# _print("SerialTimeoutException!!!!")
return (True, None, None)
except ValueError:
try:
# _print("trying to open it back up!!!!")
self.serial_port.open()
# _print("aaand open!!!!")
except serial.SerialException:
self.read_lock.release()
# _print("SerialTimeoutException!!!!")
return (True, None, None)
queue_packet = (uid, cmd_byte)
timeout_time = 0.5 + (len(self.read_queue) * 0.150) # timeout increases as queue gets larger
self.read_queue.append(queue_packet)
start_time = time.clock() + timeout_time
read_data = None
while(timeout_time > 0):
self.read_lock.wait(timeout_time)
read_data = self.read_dict.get(uid, None)
if read_data is not None:
break
timeout_time =start_time -time.clock()
# _print("Still waiting {0} {1} {2} {3}".format(uid, command,logical_id, timeout_time))
else:
# _print("Operation timed out!!!!")
try:
self.read_queue.remove(queue_packet)
except:
traceback.print_exc()
self.read_lock.release()
return (True, None, None)
self.read_lock.release()
del self.read_dict[uid]
header_list, output_data = read_data
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
# _print("RESponse {0} {1} {2} {3}".format(uid, command,logical_id, timeout_time))
if logical_id != rtn_log_id:
# _print("!!!!!!!!logical_id != rtn_log_id!!!!!")
# _print(header_list)
# _hexDump(output_data, 'o')
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
return (True, timestamp, None)
if cmd_echo != cmd_byte:
# _print("!!!!!!!!cmd_echo!=cmd_byte!!!!!")
# _print('cmd_echo= 0x{0:02x} cmd_byte= 0x{1:02x}'.format(cmd_echo, cmd_byte))
# _print(header_list)
# _hexDump(output_data, 'o')
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
# _print('!!!!!!end')
return (True, timestamp, None)
rtn_list = None
if not fail_byte:
if out_struct:
rtn_list = struct.unpack(out_struct, output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
elif cmd_echo == 0x54:
rtn_list = self[logical_id].stream_parse.unpack(output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
else:
# _print("fail_byte!!!!triggered")
pass
self._read_data = None
return (fail_byte, timestamp, rtn_list)
def __getitem__(self, idx):
hw_id = self.wireless_table[idx]
if hw_id == 0:
return None
# Check if sensor exists.
if hw_id in global_sensorlist:
rtn_inst = global_sensorlist[hw_id]
if rtn_inst.dongle is self:
return rtn_inst
elif rtn_inst.dongle is None:
_print("updating sensor {0:08X} to be wireless".format(hw_id))
return TSWLSensor(timestamp_mode=self.timestamp_mode, dongle=self, logical_id=idx)
return None
# Else, make a new TSWLSensor
else:
_print("making new sensor {0:08X}".format(hw_id))
return TSWLSensor(timestamp_mode=self.timestamp_mode, dongle=self, logical_id=idx)
def getSensorFromDongle(self, idx):
return self.__getitem__(idx)
def setSensorToDongle(self, idx, hw_id):
other_hw_id = self.wireless_table[idx]
if other_hw_id != 0:
if other_hw_id in global_sensorlist:
other_sens = global_sensorlist[other_hw_id]
other_sens.dongle = None
other_sens.logical_id = None
if hw_id not in self.wireless_table:
if hw_id in global_sensorlist:
sensor = global_sensorlist[hw_id]
sensor.dongle = None
sensor.logical_id = None
self.setSerialNumberAtLogicalID(idx, hw_id)
else:
if other_hw_id != hw_id:
other_idx = self.wireless_table.index(hw_id)
self.setSerialNumberAtLogicalID(other_idx, 0)
self.setSerialNumberAtLogicalID(idx, hw_id)
return self.__getitem__(idx)
elif hw_id != 0:
self.setSerialNumberAtLogicalID(idx, hw_id)
return self.__getitem__(idx)
def _dataReadLoop(self):
while self.data_loop:
try:
self._readDataWirelessProHeader()
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
break
except:
# traceback.print_exc()
# _print("bad _parseStreamData parse")
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
try:
self.read_lock.release()
except:
pass
def _readDataWirelessProHeader(self):
_serial_port = self.serial_port
# in_wait = _serial_port.inWaiting()
# if in_wait:
# _print('!1025! inWaiting = {0}'.format(in_wait))
header_bytes = _serial_port.read(self.header_parse.size)
if header_bytes:
# _hexDump(header_bytes, 'o')
if self.timestamp_mode == TSS_TIMESTAMP_SENSOR:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader87(header_data)
elif self.timestamp_mode == TSS_TIMESTAMP_SYSTEM:
sys_timestamp = time.clock() # time packet was parsed it might been in the system buffer a few ms
sys_timestamp *= 1000000
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader85(header_data, sys_timestamp)
else:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader85(header_data, None)
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
# _print("!!!!fail_byte={0}, cmd_echo={1}, rtn_log_id={2}, data_size={3}".format(fail_byte, cmd_echo, rtn_log_id, data_size))
output_data = _serial_port.read(data_size)
if cmd_echo is 0xff:
if data_size:
self[rtn_log_id]._parseStreamData(timestamp, output_data)
return
self.read_lock.acquire()
# _print('retrning data!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
if len(self.read_queue): # here for a bug in the code
uid, cmd_byte = self.read_queue.popleft()
if cmd_byte == cmd_echo:
self.read_dict[uid] = (header_list, output_data)
self.read_lock.notifyAll() # dies in 3 seconds if there is a writeRead in wait
else:
# _print('Unrequested packet found!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
self.read_queue.appendleft((uid, cmd_byte))
self.read_lock.release()
return
# _print('Unrequested packet found (read_queue is empty)!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
# _print("no status bytes")
self.read_lock.release()
## 209(0xd1)
def setSerialNumberAtLogicalID(self, logical_id, serial_number, timestamp=False):
arg_list = (logical_id, serial_number)
fail_byte, t_stamp, data = self.writeRead('_setSerialNumberAtLogicalID', arg_list)
if not fail_byte:
self.wireless_table[logical_id] = serial_number
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions DNG
## 176(0xb0)
def setWirelessStreamingAutoFlushMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessStreamingAutoFlushMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 177(0xb1)
def getWirelessStreamingAutoFlushMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessStreamingAutoFlushMode')
if timestamp:
return (data, t_stamp)
return data
## 182(0xb6)
def broadcastSynchronizationPulse(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('broadcastSynchronizationPulse')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 183(0xb7)
def getReceptionBitfield(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getReceptionBitfield')
if timestamp:
return (data, t_stamp)
return data
## 192(0xc0)
def getWirelessPanID(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessPanID')
if timestamp:
return (data, t_stamp)
return data
## 193(0xc1)
def setWirelessPanID(self, PanID, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessPanID', PanID)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 194(0xc2)
def getWirelessChannel(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessChannel')
if timestamp:
return (data, t_stamp)
return data
## 195(0xc3)
def setWirelessChannel(self, channel, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessChannel', channel)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 197(0xc5)
def commitWirelessSettings(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('commitWirelessSettings')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 198(0xc6)
def getWirelessAddress(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessAddress')
if timestamp:
return (data, t_stamp)
return data
## 208(0xd0)
def getSerialNumberAtLogicalID(self, logical_id, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSerialNumberAtLogicalID', logical_id)
if timestamp:
return (data, t_stamp)
return data
## 210(0xd2)
def getWirelessChannelNoiseLevels(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessChannelNoiseLevels')
if timestamp:
return (data, t_stamp)
return data
## 211(0xd3)
def setWirelessRetries(self, retries, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessRetries', retries)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 212(0xd4)
def getWirelessRetries(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessRetries')
if timestamp:
return (data, t_stamp)
return data
## 213(0xd5)
def getWirelessSlotsOpen(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessSlotsOpen')
if timestamp:
return (data, t_stamp)
return data
## 214(0xd6)
def getSignalStrength(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSignalStrength')
if timestamp:
return (data, t_stamp)
return data
## 215(0xd7)
def setWirelessHIDUpdateRate(self, update_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessHIDUpdateRate', update_rate)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 216(0xd8)
def getWirelessHIDUpdateRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessHIDUpdateRate')
if timestamp:
return (data, t_stamp)
return data
## 217(0xd9)
def setWirelessHIDAsynchronousMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessHIDAsynchronousMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 218(0xda)
def getWirelessHIDAsynchronousMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessHIDAsynchronousMode')
if timestamp:
return (data, t_stamp)
return data
## 240(0xf0)
def setJoystickLogicalID(self, logical_id, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setJoystickLogicalID', logical_id)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 241(0xf1)
def setMouseLogicalID(self, logical_id, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setMouseLogicalID', logical_id)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 242(0xf2)
def getJoystickLogicalID(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getJoystickLogicalID')
if timestamp:
return (data, t_stamp)
return data
## 243(0xf3)
def getMouseLogicalID(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMouseLogicalID')
if timestamp:
return (data, t_stamp)
return data
## END generated functions DNG
class TSEMSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'setPinMode': (0x1d, 0, None, 2, '>BB', 1),
'getPinMode': (0x1e, 2, '>BB', 0, None, 1),
'getInterruptStatus': (0x1f, 1, '>B', 0, None, 1),
'_setUARTBaudRate': (0xe7, 0, None, 4, '>I', 1),
'getUARTBaudRate': (0xe8, 4, '>I', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["EM", "EM-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSEMSensor._device_types)
_print('Error serial port was not made')
## 231(0xe7)
def setUARTBaudRate(self, baud_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_setUARTBaudRate', baud_rate)
if not fail_byte:
self.baudrate = baud_rate
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions EM_
## 29(0x1d)
def setPinMode(self, mode, pin, timestamp=False):
arg_list = (mode, pin)
fail_byte, t_stamp, data = self.writeRead('setPinMode', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 30(0x1e)
def getPinMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getPinMode')
if timestamp:
return (data, t_stamp)
return data
## 31(0x1f)
def getInterruptStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getInterruptStatus')
if timestamp:
return (data, t_stamp)
return data
## 232(0xe8)
def getUARTBaudRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUARTBaudRate')
if timestamp:
return (data, t_stamp)
return data
## END generated functions EM_
class TSDLSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'turnOnMassStorage': (0x39, 0, None, 0, None, 1),
'turnOffMassStorage': (0x3a, 0, None, 0, None, 1),
'formatAndInitializeSDCard': (0x3b, 0, None, 0, None, 1),
'beginDataLoggingSession': (0x3c, 0, None, 0, None, 1),
'endDataLoggingSession': (0x3d, 0, None, 0, None, 1),
'setClockValues': (0x3e, 0, None, 6, '>6B', 1),
'getClockValues': (0x3f, 6, '>6B', 0, None, 1),
'getBatteryVoltage': (0xc9, 4, '>f', 0, None, 1),
'getBatteryPercentRemaining': (0xca, 1, '>B', 0, None, 1),
'getBatteryStatus': (0xcb, 1, '>B', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["DL", "DL-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSDLSensor._device_types)
_print('Error serial port was not made')
## generated functions DL_
## 57(0x39)
def turnOnMassStorage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('turnOnMassStorage')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 58(0x3a)
def turnOffMassStorage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('turnOffMassStorage')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 59(0x3b)
def formatAndInitializeSDCard(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('formatAndInitializeSDCard')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 60(0x3c)
def beginDataLoggingSession(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('beginDataLoggingSession')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 61(0x3d)
def endDataLoggingSession(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('endDataLoggingSession')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 62(0x3e)
def setClockValues(self, month, day, year, hour, minute, second, timestamp=False):
arg_list = (month, day, year, hour, minute, second)
fail_byte, t_stamp, data = self.writeRead('setClockValues', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 63(0x3f)
def getClockValues(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getClockValues')
if timestamp:
return (data, t_stamp)
return data
## 201(0xc9)
def getBatteryVoltage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryVoltage')
if timestamp:
return (data, t_stamp)
return data
## 202(0xca)
def getBatteryPercentRemaining(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryPercentRemaining')
if timestamp:
return (data, t_stamp)
return data
## 203(0xcb)
def getBatteryStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryStatus')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions DL_
class TSBTSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'getBatteryVoltage': (0xc9, 4, '>f', 0, None, 1),
'getBatteryPercentRemaining': (0xca, 1, '>B', 0, None, 1),
'getBatteryStatus': (0xcb, 1, '>B', 0, None, 1),
'_setUARTBaudRate': (0xe7, 0, None, 4, '>I', 1),
'getUARTBaudRate': (0xe8, 4, '>I', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["BT", "BT-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=2.5, writeTimeout=2.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.25)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSBTSensor._device_types)
_print('Error serial port was not made')
## 231(0xe7)
def setUARTBaudRate(self, baud_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_setUARTBaudRate', baud_rate)
if not fail_byte:
self.baudrate = baud_rate
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions BT_
## 201(0xc9)
def getBatteryVoltage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryVoltage')
if timestamp:
return (data, t_stamp)
return data
## 202(0xca)
def getBatteryPercentRemaining(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryPercentRemaining')
if timestamp:
return (data, t_stamp)
return data
## 203(0xcb)
def getBatteryStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryStatus')
if timestamp:
return (data, t_stamp)
return data
## 232(0xe8)
def getUARTBaudRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUARTBaudRate')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions BT_
global_broadcaster= Broadcaster()
| mit |
eckhart/himlar | profile/files/openstack/horizon/overrides.py | 1 | 1079 | # Disable Floating IPs
from openstack_dashboard.dashboards.project.access_and_security import tabs
from openstack_dashboard.dashboards.project.instances import tables
import horizon
NO = lambda *x: False
tabs.FloatingIPsTab.allowed = NO
tabs.APIAccessTab.allowed = NO
tables.AssociateIP.allowed = NO
tables.SimpleAssociateIP.allowed = NO
tables.SimpleDisassociateIP.allowed = NO
project_dashboard = horizon.get_dashboard("project")
# Completely remove panel Network->Routers
routers_panel = project_dashboard.get_panel("routers")
project_dashboard.unregister(routers_panel.__class__)
# Completely remove panel Network->Networks
networks_panel = project_dashboard.get_panel("networks")
project_dashboard.unregister(networks_panel.__class__) # Disable Floating IPs
# Completely remove panel Network->Network Topology
topology_panel = project_dashboard.get_panel("network_topology")
project_dashboard.unregister(topology_panel.__class__)
# Remove "Volume Consistency Groups" tab
from openstack_dashboard.dashboards.project.volumes import tabs
tabs.CGroupsTab.allowed = NO
| apache-2.0 |
chuan9/chromium-crosswalk | ui/base/ime/chromeos/generate_character_composer_data.py | 45 | 13558 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a compact character composition table.
Normal use:
./generate_character_composer_data.py \
--output character_composer_data.h \
character_composer_sequences.txt
Run this script with --help for a description of arguments.
Input file format:
Comments begin with '#' and extend to the end of the line.
Each non-comment line is a sequence of two or more keys, separated by
space, the last of which is the result of composing the initial part.
A sequence must begin with a dead key or Compose, and the result must
be a character key.
Each key can either be a character key, a dead key, or the compose key.
A character key is written as one of the following inside matched delimiters:
- a single printable ASCII character;
- a Unicode character name;
- 'U+' followed by one or more hexadecimal digits.
Delimiter pairs are any of '' "" () <> [] or {}.
A dead key is written as the word 'Dead' followed (immediately, without space)
by the combining character written in the same form as a character key.
A compose key is written as the word 'Compose'.
Output file format:
The output file is a C++ header containing a small header structure
|ui::TreeComposeChecker::CompositionData| and a tree of composition sequences.
For space efficiency, the tree is stored in a single array of 16-bit values,
which represent either characters (printable or dead-key) or subtree array
indices.
Each tree level consists for four tables: two key kinds (character or dead)
by two node types (internal or leaf), in the order:
- character internal
- character leaf
- dead-key internal
- dead-key leaf
This order is used because character key entries are more common than dead-key
entries, and internal entries are more common than leaf entries.
Each table consists of a word containing the number of table entries |n|,
followed immediately by |n| key-value pairs of words, ordered by key.
For internal edges, the value is the array index of the resulting subtree.
For leaf edges, the value is the unicode character code of the composition
result.
"""
import argparse
import codecs
import collections
import sys
import unicodedata
# Global error counter.
g_fail = 0
class Key(str):
"""Represents an element of a composition sequence.
Supports only Compose, dead keys, and BMP unicode characters.
Based on |str| for easy comparison and sorting.
The representation is 'C' (for unicode characters) or 'D' (for dead keys)
followed by 4 hex digits for the character value. The Compose key is
represented as dead key with combining character 0.
"""
_kinds = ['character', 'dead_key']
def __new__(cls, key, character, location=None):
"""Construct a Key.
Call as:
- Key(None, character_code)
- Key('Dead', combining_character_code)
- Key('Compose', 0)
"""
global g_fail
if character > 0xFFFF:
print '{}: unsupported non-BMP character {}'.format(location, character)
g_fail += 1
s = 'ERROR'
elif key is None:
s = 'C{:04X}'.format(character)
elif key.lower() == 'dead':
s = 'D{:04X}'.format(character)
elif key.lower() == 'compose':
s = 'D0000'
else:
print '{}: unexpected combination {}<{}>'.format(location, key, character)
g_fail += 1
s = 'ERROR'
return str.__new__(cls, s)
def Kind(self):
return {'C': 'character', 'D': 'dead_key'}[self[0]]
def CharacterCode(self):
return int(self[1:], 16)
def UnicodeName(self):
v = self.CharacterCode()
try:
return unicodedata.name(unichr(v)).lower()
except ValueError:
return 'U+{:04X}'.format(v)
def ShorterUnicodeName(self):
s = self.UnicodeName()
if s.startswith('latin ') or s.startswith('greek '):
s = s[6:]
if s.startswith('small '):
s = s[6:]
return s.replace(' letter ', ' ')
def Pretty(self):
if self == 'D0000':
return 'Compose'
return ('Dead' if self[0] == 'D' else '') + '<' + self.UnicodeName() + '>'
class Input:
"""
Takes a sequences of file names and presents them as a single input stream,
with location reporting for error messages.
"""
def __init__(self, filenames):
self._pending = filenames
self._filename = None
self._file = None
self._line = None
self._lineno = 0
self._column = 0
def Where(self):
"""Report the current input location, for error messages."""
if self._file:
return '{}:{}:{}'.format(self._filename, self._lineno, self._column)
if self._pending:
return '<before>'
return '<eof>'
def Get(self):
"""Return the next input character, or None when inputs are exhausted."""
if self._line is None:
if self._file is None:
if not self._pending:
return None
self._filename = self._pending[0]
self._pending = self._pending[1:]
self._file = codecs.open(self._filename, mode='rb', encoding='utf-8')
self._lineno = 0
self._lineno += 1
self._line = self._file.readline()
if not self._line:
self._file = None
self._filename = None
return self.Get()
self._column = 0
if self._column >= len(self._line):
self._line = None
return self.Get()
c = self._line[self._column]
self._column += 1
return c
class Lexer:
"""
Breaks the input stream into a sequence of tokens, each of which is either
a Key or the string 'EOL'.
"""
def __init__(self, compose_input):
self._input = compose_input
_delimiters = {
'"': '"',
"'": "'",
'<': '>',
'(': ')',
'[': ']',
'{': '}',
}
def GetUntilDelimiter(self, e):
text = ''
c = self._input.Get()
while c and c != e:
text += c
c = self._input.Get()
return text
def Get(self):
global g_fail
c = ' '
while c and c.isspace() and c != '\n':
c = self._input.Get()
if not c:
return None
location = self._input.Where()
if c == '\n':
return 'EOL'
if c == '#':
self.GetUntilDelimiter('\n')
return 'EOL'
if c == '\\':
self.GetUntilDelimiter('\n')
return self.Get()
key = None
character = None
if c.isalnum():
key = ''
while c and c.isalnum():
key += c
c = self._input.Get()
if c in Lexer._delimiters:
s = self.GetUntilDelimiter(Lexer._delimiters[c])
if len(s) == 1:
character = ord(s)
elif s.startswith('U+'):
character = int(s[2:], 16)
else:
try:
character = ord(unicodedata.lookup(s.upper()))
except KeyError:
g_fail += 1
character = None
print '{}: unknown character name "{}"'.format(location,
s.encode('utf-8'))
return Key(key, character, location)
def Where(self):
return self._input.Where()
class Parser:
"""
Takes a sequence of tokens from a Lexer and returns a tree of
composition sequences, represented as nested dictionaries where each
composition source element key is a dictionary key, and the final
composition result has a dictionary key of |None|.
"""
def __init__(self, lexer):
self._lexer = lexer
self._trie = {}
def Parse(self):
global g_fail
self._trie = {}
while True:
seq = []
t = self._lexer.Get()
if not t:
break
if t and t != 'EOL' and t.Kind() != 'dead_key':
g_fail += 1
print ('{}: sequence does not begin with Compose or Dead key'
.format(self._lexer.Where()))
break
while t and t != 'EOL':
seq.append(t)
t = self._lexer.Get()
if not seq:
continue
self.AddToSimpleTree(self._trie, seq)
return self._trie
def AddToSimpleTree(self, tree, seq):
first = seq[0]
rest = seq[1:]
if first not in tree:
tree[first] = {}
if len(rest) == 1:
# Leaf
tree[first][None] = rest[0]
else:
self.AddToSimpleTree(tree[first], rest)
class GroupedTree:
"""
Represents composition sequences in a manner close to the output format.
The core of the representation is the |_tables| dictionary, which has
an entry for each kind of |Key|, each of which is a dictionary with
two entries, 'internal' and 'leaf', for the output tables, each being
a dictionary indexed by a composition sequence |Key|. For 'internal'
tables the dictionary values are |GroupedTree|s at the next level;
for 'leaf' tables the dictionary values are |Key| composition results.
"""
_key_kinds = Key._kinds
_sub_parts = ['internal', 'leaf']
def __init__(self, simple_trie, path=None):
if path is None:
path = []
self.path = path
self.depth = len(path)
self.height = 0
self.empty = True
self.location = -1
# Initialize |_tables|.
self._tables = {}
for k in self._key_kinds:
self._tables[k] = {}
for p in self._sub_parts:
self._tables[k][p] = {}
# Build the tables.
for key in simple_trie:
if key is not None:
# Leaf table entry.
if None in simple_trie[key]:
self.empty = False
self._tables[key.Kind()]['leaf'][key] = simple_trie[key][None]
# Internal subtree entries.
v = GroupedTree(simple_trie[key], path + [key])
if not v.empty:
self.empty = False
self._tables[key.Kind()]['internal'][key] = v
if self.height < v.height:
self.height = v.height
self.height += 1
def SubTrees(self):
"""Returns a list of all sub-|GroupedTree|s of the current GroupTree."""
r = []
for k in self._key_kinds:
for key in sorted(self._tables[k]['internal']):
r.append(self._tables[k]['internal'][key])
return r
class Assembler:
"""Convert a parse tree via a GroupedTree to a C++ header."""
def __init__(self, args, dtree):
self._name = args.data_name
self._type = args.type_name
self._gtree = GroupedTree(dtree)
def Write(self, out):
# First pass: determine table sizes and locations.
self.Pass(None, self._gtree)
# Second pass: write the array.
out.write('\nstatic const uint16_t {}Tree[] = {{\n'.format(self._name))
end = self.Pass(out, self._gtree)
out.write('};\n\n')
# Write the description structure.
out.write('static const {} {} = {{\n'
.format(self._type, self._name))
out.write(' {}, // maximum sequence length\n'.format(self._gtree.height))
out.write(' {}, // tree array entries\n'.format(end))
out.write(' {}Tree\n'.format(self._name))
out.write('};\n\n')
def Pass(self, out, gtree, location=0):
gtree.location = location
# Write tree header.
if out:
out.write('\n // offset 0x{:04X}:\n'.format(location))
if gtree.path:
out.write(' // prefix:\n')
for key in gtree.path:
out.write(' // {}\n'.format(key.Pretty()))
# Write tables.
for k in gtree._key_kinds:
for p in gtree._sub_parts:
# Write table size.
location += 1
if out:
out.write(' // {} {} table\n'.format(p, k))
out.write(' 0x{:04X}, // number of entries\n'
.format(len(gtree._tables[k][p])))
# Write table body.
for key in sorted(gtree._tables[k][p]):
location += 2
if out:
out.write(' 0x{:04X}, // {}\n'
.format(key.CharacterCode(), key.ShorterUnicodeName()))
result = gtree._tables[k][p][key]
if p == 'internal':
out.write(' 0x{:04X},\n'.format(result.location))
else:
out.write(' 0x{:04X}, // -> {}\n'
.format(result.CharacterCode(),
result.ShorterUnicodeName()))
# Assemble subtrees of the current tree.
for t in gtree.SubTrees():
location = self.Pass(out, t, location)
return location
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--type_name',
default='ui::TreeComposeChecker::CompositionData')
parser.add_argument('--data_name', default='kCompositions')
parser.add_argument('--output', default='character_composer_data.h')
parser.add_argument('--guard', default=None)
parser.add_argument('inputs', nargs='*')
args = parser.parse_args(argv[1:])
parse_tree = Parser(Lexer(Input(args.inputs))).Parse()
with (sys.stdout if args.output == '-' else open(args.output, 'wb')) as out:
out.write('// Copyright 2015 The Chromium Authors. All rights reserved.\n')
out.write('// Use of this source code is governed by a BSD-style license')
out.write(' that can be\n// found in the LICENSE file.\n\n')
out.write('// DO NOT EDIT.\n')
out.write('// GENERATED BY {}\n'.format(sys.argv[0]))
out.write('// FROM {}\n\n'.format(' '.join(args.inputs)))
guard = args.guard if args.guard else args.output
guard = ''.join([c.upper() if c.isalpha() else '_' for c in guard])
out.write('#ifndef {0}_\n#define {0}_\n'.format(guard))
Assembler(args, parse_tree).Write(out)
out.write('#endif // {}_\n'.format(guard))
return g_fail
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/django/contrib/auth/hashers.py | 78 | 17327 | from __future__ import unicode_literals
import base64
import binascii
import hashlib
import importlib
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
must_update = hasher.algorithm != preferred.algorithm
if not must_update:
must_update = preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256 with 20000 iterations.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 20000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
# Ensure that our data is a bytestring
data = force_bytes(data)
# force_bytes() necessary for py-bcrypt compatibility
hashpw = force_bytes(bcrypt.hashpw(password, data))
return constant_time_compare(data, hashpw)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSha512PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
| bsd-3-clause |
gdm/aws-cfn-resource-bridge | aws/cfn/bridge/processes.py | 4 | 2596 | #==============================================================================
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
import subprocess
import os
class ProcessResult(object):
"""
Return object for ProcessHelper
"""
def __init__(self, returncode, stdout, stderr):
self._returncode = returncode
self._stdout = stdout if not stdout else stdout.decode('utf-8')
self._stderr = stderr if not stderr else stderr.decode('utf-8')
@property
def returncode(self):
return self._returncode
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
class ProcessHelper(object):
"""
Helper to simplify command line execution
"""
def __init__(self, cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=None, cwd=None):
self._cmd = cmd
self._stdout = stdout
self._stderr = stderr
if not env:
self._env = None
elif os.name == 'nt': # stringify the environment in Windows, which cannot handle unicodes
# Windows requires inheriting some of the parent process' environment, so just take them all.
self._env = dict(((str(k), str(v)) for k, v in os.environ.iteritems()))
self._env.update(dict(((str(k), str(v)) for k, v in env.iteritems())))
else:
self._env = dict(os.environ.copy())
self._env.update(dict(env))
self._cwd = cwd
def call(self):
"""
Calls the command, returning a tuple of (returncode, stdout, stderr)
"""
process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,
shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)
return_data = process.communicate()
return ProcessResult(process.returncode, return_data[0], return_data[1])
| apache-2.0 |
lioncui/pybix | client/plugin/RedisPlugin.py | 1 | 3753 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from lib import pybixlib
import traceback
from p_class import plugins
import redis
class RedisPlugin(plugins.plugin):
def __init__(self, uuid, taskConf, agentType):
plugins.plugin.__init__(
self, uuid, taskConf, agentType)
def data_format_MB(self, data):
data = int(data)
data = data/1048576
data = "%.2f" % data
data = float(data)
return data
def data_format_Ratio(self, hit, mis):
hit = int(hit)
mis = int(mis)
if (hit+mis) == 0:
return 0
data = (hit*100)/(hit+mis)
data = "%.2f" % data
data = float(data)
return data
def data_format_connected_per_min(self, connected, min):
data = float(connected)/min
data = "%.2f" % data
return data
def data_format_command_per_min(self, command, min):
data = float(command)/min
data = "%.2f" % data
return data
def getData(self):
status_content = {}
try:
host = self.taskConf.get("host")
port = self.taskConf.get("port")
password = self.taskConf.get("password")
self.server = redis.StrictRedis(host=host, port=port,
password=password,
socket_connect_timeout=30)
self.info = self.server.info()
status_content['redis_version'] = self.info['redis_version']
status_content['used_memory'] = self.info['used_memory']
status_content['connected_clients'] = self.info[
'connected_clients']
status_content['connected_slaves'] = self.info['connected_slaves']
status_content['uptime_in_minutes'] = self.info[
'uptime_in_seconds'] / 60
#status_content['connected_per_min'] = self.data_format_connected_per_min(status_content['connected_clients'], status_content['uptime_in_minutes'])
status_content['rejected_connections'] = self.info[
'rejected_connections']
status_content['pubsub_patterns'] = self.info['pubsub_patterns']
status_content['pubsub_channels'] = self.info['pubsub_channels']
status_content['keyspace_hits'] = self.info['keyspace_hits']
status_content['keyspace_misses'] = self.info['keyspace_misses']
#status_content['keyspace_hits'] = self.data_format_Ratio(self.info['keyspace_hits'], self.info['keyspace_misses'])
status_content['commands_total'] = self.info[
'total_commands_processed']
#status_content['command_per_min'] = self.data_format_command_per_min(self.info['total_commands_processed'], status_content['uptime_in_minutes'])
status_content['usedMemoryRss'] = self.info['used_memory_rss']
status_content['memFragmentationRatio'] = self.info[
'mem_fragmentation_ratio']
status_content['blockedClients'] = self.info['blocked_clients']
totalKey = 0
for key in self.info:
if key.startswith('db'):
totalKey = totalKey + self.info[key]['keys']
status_content['totalKeys'] = totalKey
except Exception:
pybixlib.error(self.logHead + traceback.format_exc())
self.errorInfoDone(traceback.format_exc())
status_content = {}
finally:
self.setData({'agentType': self.agentType, 'uuid': self.uuid,
'code': self.code, 'time': self.getCurTime(),
'data': status_content, 'error_info': self.error_info})
self.intStatus()
| gpl-3.0 |
kobejean/tensorflow | tensorflow/python/keras/applications/applications_test.py | 16 | 1932 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras applications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.keras import applications
from tensorflow.python.platform import test
MODEL_LIST = [
(applications.ResNet50, 2048),
(applications.VGG16, 512),
(applications.VGG19, 512),
(applications.Xception, 2048),
(applications.InceptionV3, 2048),
(applications.InceptionResNetV2, 1536),
(applications.MobileNet, 1024),
# TODO(fchollet): enable MobileNetV2 tests when a new TensorFlow test image
# is released with keras_applications upgraded to 1.0.5 or above.
(applications.DenseNet121, 1024),
(applications.DenseNet169, 1664),
(applications.DenseNet201, 1920),
(applications.NASNetMobile, 1056),
(applications.NASNetLarge, 4032),
]
class ApplicationsTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(*MODEL_LIST)
def test_feature_extration_model(self, model_fn, output_dim):
model = model_fn(include_top=False, weights=None)
self.assertEqual(model.output_shape, (None, None, None, output_dim))
if __name__ == '__main__':
test.main()
| apache-2.0 |
goodhanrry/updaten_915s_to_lollipop | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
kernsuite-debian/lofar | SAS/ResourceAssignment/ResourceAssignmentEditor/lib/webservice.py | 1 | 39598 | #!/usr/bin/env python3
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
# $Id$
'''ResourceAssignmentEditor webservice serves a interactive html5 website for
viewing and editing lofar resources.'''
import sys
import os
import time
from optparse import OptionParser
from threading import Condition, Lock, current_thread, Thread
import _strptime
from datetime import datetime, timedelta
from json import loads as json_loads
import time
import logging
import subprocess
from dateutil import parser, tz
from flask import Flask
from flask import render_template
from flask import request
from flask import abort
from flask import url_for
from lofar.common.flask_utils import gzipped
from lofar.messaging.rpc import RPCException
from lofar.messaging import DEFAULT_BROKER, DEFAULT_BUSNAME
from lofar.sas.resourceassignment.resourceassignmenteditor.fakedata import *
from lofar.sas.resourceassignment.resourceassignmenteditor.changeshandler import ChangesHandler, CHANGE_DELETE_TYPE
from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RADBRPC
from lofar.mom.momqueryservice.momqueryrpc import MoMQueryRPC
from lofar.sas.resourceassignment.resourceassignmenteditor.mom import updateTaskMomDetails
from lofar.sas.resourceassignment.resourceassignmenteditor.storage import updateTaskStorageDetails
from lofar.sas.datamanagement.cleanup.rpc import CleanupRPC
from lofar.sas.datamanagement.storagequery.rpc import StorageQueryRPC
from lofar.sas.otdb.otdbrpc import OTDBRPC
from lofar.common import isProductionEnvironment, isTestEnvironment
from lofar.common.util import humanreadablesize
from lofar.common.subprocess_utils import communicate_returning_strings
from lofar.common import dbcredentials
from lofar.sas.resourceassignment.database.radb import RADatabase
logger = logging.getLogger(__name__)
def asDatetime(isoString):
if isoString[-1] == 'Z':
isoString = isoString[:-1]
if isoString[-4] == '.':
isoString += '000'
return datetime.strptime(isoString, '%Y-%m-%dT%H:%M:%S.%f')
def asIsoFormat(timestamp):
return datetime.strftime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
__root_path = os.path.dirname(os.path.realpath(__file__))
'''The flask webservice app'''
app = Flask('Scheduler',
instance_path=__root_path,
template_folder=os.path.join(__root_path, 'templates'),
static_folder=os.path.join(__root_path, 'static'),
instance_relative_config=True)
# Load the default configuration
app.config.from_object('lofar.sas.resourceassignment.resourceassignmenteditor.config.default')
try:
import ujson
def convertDictDatetimeValuesToString(obj):
'''recursively convert all string values in the dict to buffer'''
if isinstance(obj, list):
return [convertDictDatetimeValuesToString(x) if (isinstance(x, dict) or isinstance(x, list)) else x for x in obj]
return dict( (k, convertDictDatetimeValuesToString(v) if (isinstance(v, dict) or isinstance(v, list)) else asIsoFormat(v) if isinstance(v, datetime) else v) for k,v in list(obj.items()))
def jsonify(obj):
'''faster implementation of flask.json.jsonify using ultrajson and the above datetime->string convertor'''
json_str = ujson.dumps(dict(convertDictDatetimeValuesToString(obj)))
return app.response_class(json_str, mimetype='application/json')
except:
from flask.json import jsonify
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, datetime):
return asIsoFormat(obj)
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
app.json_encoder = CustomJSONEncoder
rarpc = None
otdbrpc = None
curpc = None
sqrpc = None
momqueryrpc = None
changeshandler = None
_radb_pool = {}
_radb_pool_lock = Lock()
_radb_dbcreds = None
def radb():
global _radb_pool, _radb_pool_lock
if _radb_dbcreds:
with _radb_pool_lock:
thread = current_thread()
tid = thread.ident
now = datetime.utcnow()
if tid not in _radb_pool:
logger.info('creating radb connection for thread %s', tid)
_radb_pool[tid] = { 'connection': RADatabase(dbcreds=_radb_dbcreds),
'last_used': now }
thread_conn_obj = _radb_pool[tid]
thread_conn_obj['last_used'] = now
threshold = timedelta(minutes=5)
obsolete_connections_tids = [tid for tid,tco in list(_radb_pool.items()) if now - tco['last_used'] > threshold]
for tid in obsolete_connections_tids:
logger.info('deleting radb connection for thread %s', tid)
del _radb_pool[tid]
return thread_conn_obj['connection']
return rarpc
@app.route('/')
@app.route('/index.htm')
@app.route('/index.html')
@gzipped
def index():
'''Serves the ResourceAssignmentEditor's index page'''
return render_template('index.html', title='Scheduler')
@app.route('/projects')
@app.route('/projects.htm')
@app.route('/projects.html')
@gzipped
def projects():
return render_template('projects.html', title='Projects')
@app.route('/rest/config')
@gzipped
def config():
config = {'mom_base_url':'',
'lta_base_url':'',
'inspection_plots_base_url':'https://proxy.lofar.eu/inspect/HTML/',
'sky_view_base_url':'http://dop344.astron.nl:5000/uvis/id'}
if isProductionEnvironment():
config['mom_base_url'] = 'https://lofar.astron.nl/mom3'
config['lta_base_url'] = 'http://lofar.target.rug.nl/'
elif isTestEnvironment():
config['mom_base_url'] = 'http://lofartest.control.lofar:8080/mom3'
config['lta_base_url'] = 'http://lofar-test.target.rug.nl/'
return jsonify({'config': config})
@app.route('/rest/resources')
@gzipped
def resources():
result = radb().getResources(include_availability=True)
return jsonify({'resources': result})
@app.route('/rest/resources/<int:resource_id>')
@gzipped
def resource(resource_id):
result = radb().getResources(resource_ids=[resource_id], include_availability=True)
if result:
return jsonify(result[0])
return jsonify({})
@app.route('/rest/resources/<int:resource_id>/resourceclaims')
@gzipped
def resourceclaimsForResource(resource_id):
return resourceclaimsForResourceFromUntil(resource_id, None, None)
@app.route('/rest/resources/<int:resource_id>/resourceclaims/<string:fromTimestamp>')
@gzipped
def resourceclaimsForResourceFrom(resource_id, fromTimestamp=None):
return resourceclaimsForResourceFromUntil(resource_id, fromTimestamp, None)
@app.route('/rest/resources/<int:resource_id>/resourceclaims/<string:fromTimestamp>/<string:untilTimestamp>')
@gzipped
def resourceclaimsForResourceFromUntil(resource_id, fromTimestamp=None, untilTimestamp=None):
if fromTimestamp and isinstance(fromTimestamp, str):
fromTimestamp = asDatetime(fromTimestamp)
if untilTimestamp and isinstance(untilTimestamp, str):
untilTimestamp = asDatetime(untilTimestamp)
claims = radb().getResourceClaims(lower_bound=fromTimestamp,
upper_bound=untilTimestamp,
resource_ids=[resource_id],
extended=False,
include_properties=True)
return jsonify({'resourceclaims': claims})
@app.route('/rest/resourcegroups')
@gzipped
def resourcegroups():
result = radb().getResourceGroups()
return jsonify({'resourcegroups': result})
@app.route('/rest/resourcegroupmemberships')
@gzipped
def resourcegroupsmemberships():
result = radb().getResourceGroupMemberships()
return jsonify({'resourcegroupmemberships': result})
@app.route('/rest/resourceclaims')
def resourceclaims():
return resourceclaimsFromUntil(None, None)
@app.route('/rest/resourceclaims/<string:fromTimestamp>')
def resourceclaimsFrom(fromTimestamp=None):
return resourceclaimsFromUntil(fromTimestamp, None)
@app.route('/rest/resourceclaims/<string:fromTimestamp>/<string:untilTimestamp>')
@gzipped
def resourceclaimsFromUntil(fromTimestamp=None, untilTimestamp=None):
if fromTimestamp and isinstance(fromTimestamp, str):
fromTimestamp = asDatetime(fromTimestamp)
if untilTimestamp and isinstance(untilTimestamp, str):
untilTimestamp = asDatetime(untilTimestamp)
claims = radb().getResourceClaims(lower_bound=fromTimestamp, upper_bound=untilTimestamp, include_properties=True)
return jsonify({'resourceclaims': claims})
@app.route('/rest/resourceusages')
@gzipped
def resourceUsages():
return resourceUsagesFromUntil(None, None)
@app.route('/rest/resourceusages/<string:fromTimestamp>/<string:untilTimestamp>')
@gzipped
def resourceUsagesFromUntil(fromTimestamp=None, untilTimestamp=None):
if fromTimestamp and isinstance(fromTimestamp, str):
fromTimestamp = asDatetime(fromTimestamp)
if untilTimestamp and isinstance(untilTimestamp, str):
untilTimestamp = asDatetime(untilTimestamp)
result = radb().getResourceUsages(lower_bound=fromTimestamp, upper_bound=untilTimestamp)
return jsonify({'resourceusages': result})
@app.route('/rest/resources/<int:resource_id>/usages', methods=['GET'])
@app.route('/rest/resourceusages/<int:resource_id>', methods=['GET'])
@gzipped
def resourceUsagesForResource(resource_id):
return resourceUsagesForResourceFromUntil(resource_id, None, None)
@app.route('/rest/resources/<int:resource_id>/usages/<string:fromTimestamp>/<string:untilTimestamp>', methods=['GET'])
@app.route('/rest/resourceusages/<int:resource_id>/<string:fromTimestamp>/<string:untilTimestamp>', methods=['GET'])
@gzipped
def resourceUsagesForResourceFromUntil(resource_id, fromTimestamp=None, untilTimestamp=None):
if fromTimestamp and isinstance(fromTimestamp, str):
fromTimestamp = asDatetime(fromTimestamp)
if untilTimestamp and isinstance(untilTimestamp, str):
untilTimestamp = asDatetime(untilTimestamp)
result = radb().getResourceUsages(resource_ids=[resource_id], lower_bound=fromTimestamp, upper_bound=untilTimestamp)
return jsonify({'resourceusages': result})
@app.route('/rest/tasks/<int:task_id>/resourceusages', methods=['GET'])
@gzipped
def resourceUsagesForTask(task_id):
result = radb().getResourceUsages(task_ids=[task_id])
return jsonify({'resourceusages': result})
@app.route('/rest/tasks/<int:task_id>/resourceclaims', methods=['GET'])
@gzipped
def resourceClaimsForTask(task_id):
result = radb().getResourceClaims(task_ids=[task_id], extended=True, include_properties=True)
return jsonify({'resourceclaims': result})
@app.route('/rest/tasks')
def getTasks():
return getTasksFromUntil(None, None)
@app.route('/rest/tasks/<string:fromTimestamp>')
def getTasksFrom(fromTimestamp):
return getTasksFromUntil(fromTimestamp, None)
@app.route('/rest/tasks/<string:fromTimestamp>/<string:untilTimestamp>')
@gzipped
def getTasksFromUntil(fromTimestamp=None, untilTimestamp=None):
if fromTimestamp and isinstance(fromTimestamp, str):
fromTimestamp = asDatetime(fromTimestamp)
if untilTimestamp and isinstance(untilTimestamp, str):
untilTimestamp = asDatetime(untilTimestamp)
tasks = radb().getTasks(fromTimestamp, untilTimestamp)
updateTaskDetails(tasks)
return jsonify({'tasks': tasks})
def updateTaskDetails(tasks):
#update the mom details and the storage details in parallel
t1 = Thread(target=updateTaskMomDetails, args=(tasks, momqueryrpc))
t2 = Thread(target=updateTaskStorageDetails, args=(tasks, sqrpc, curpc))
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
#wait for mom details thread to finish
t1.join()
#task details (such as name/description) from MoM are done
#get extra details on reserved resources for reservations (while the storage details still run in t2)
reservationTasks = [t for t in tasks if t['type'] == 'reservation']
if reservationTasks:
reservationClaims = radb().getResourceClaims(task_ids=[t['id'] for t in reservationTasks], extended=True, include_properties=False)
task2claims = {}
for claim in reservationClaims:
if claim['task_id'] not in task2claims:
task2claims[claim['task_id']] = []
task2claims[claim['task_id']].append(claim)
for task in reservationTasks:
claims = task2claims.get(task['id'], [])
task['name'] = ', '.join(c['resource_name'] for c in claims)
task['description'] = 'Reservation on ' + task['name']
#wait for storage details thread to finish
t2.join()
@app.route('/rest/tasks/<int:task_id>', methods=['GET'])
@gzipped
def getTask(task_id):
try:
task = radb().getTask(task_id)
if not task:
abort(404)
task['name'] = 'Task %d' % task['id']
updateTaskDetails([task])
return jsonify({'task': task})
except Exception as e:
abort(404)
return jsonify({'task': None})
@app.route('/rest/tasks/otdb/<int:otdb_id>', methods=['GET'])
@gzipped
def getTaskByOTDBId(otdb_id):
try:
task = radb().getTask(otdb_id=otdb_id)
if not task:
abort(404)
task['name'] = 'Task %d' % task['id']
updateTaskDetails([task])
return jsonify({'task': task})
except Exception as e:
abort(404)
return jsonify({'task': None})
@app.route('/rest/tasks/mom/<int:mom_id>', methods=['GET'])
@gzipped
def getTaskByMoMId(mom_id):
try:
task = radb().getTask(mom_id=mom_id)
if not task:
abort(404)
task['name'] = 'Task %d' % task['id']
updateTaskDetails([task])
return jsonify({'task': task})
except Exception as e:
abort(404)
return jsonify({'task': None})
@app.route('/rest/tasks/mom/group/<int:mom_group_id>', methods=['GET'])
@gzipped
def getTasksByMoMGroupId(mom_group_id):
try:
mom_ids = momqueryrpc.getTaskIdsInGroup(mom_group_id)[str(mom_group_id)]
tasks = radb().getTasks(mom_ids=mom_ids)
updateTaskDetails(tasks)
return jsonify({'tasks': tasks})
except Exception as e:
abort(404)
@app.route('/rest/tasks/mom/parentgroup/<int:mom_parent_group_id>', methods=['GET'])
@gzipped
def getTasksByMoMParentGroupId(mom_parent_group_id):
try:
mom_ids = momqueryrpc.getTaskIdsInParentGroup(mom_parent_group_id)[str(mom_parent_group_id)]
tasks = radb().getTasks(mom_ids=mom_ids)
updateTaskDetails(tasks)
return jsonify({'tasks': tasks})
except Exception as e:
abort(404)
@app.route('/rest/tasks/<int:task_id>', methods=['PUT'])
def putTask(task_id):
if 'Content-Type' in request.headers and \
request.headers['Content-Type'].startswith('application/json'):
try:
updatedTask = json_loads(request.data.decode('utf-8'))
if task_id != int(updatedTask['id']):
abort(404, 'task_id in url is not equal to id in request.data')
#check if task is known
task = radb().getTask(task_id)
if not task:
abort(404, "unknown task %s" % str(updatedTask))
# first handle start- endtimes...
if 'starttime' in updatedTask or 'endtime' in updatedTask:
logger.info('starttime or endtime in updatedTask: %s', updatedTask)
if isProductionEnvironment():
abort(403, 'Editing of %s of tasks by users is not yet approved' % (time,))
#update dict for otdb spec
spec_update = {}
for timeprop in ['starttime', 'endtime']:
if timeprop in updatedTask:
try:
updatedTask[timeprop] = asDatetime(updatedTask[timeprop])
except ValueError:
abort(400, 'timestamp not in iso format: ' + updatedTask[timeprop])
otdb_key = 'LOFAR.ObsSW.Observation.' + ('startTime' if timeprop == 'starttime' else 'stopTime')
spec_update[otdb_key] = updatedTask[timeprop].strftime('%Y-%m-%d %H:%M:%S')
#update timestamps in both otdb and radb
otdbrpc.taskSetSpecification(task['otdb_id'], spec_update)
# update the task's (and its claims) start/endtime
# do not update the tasks status directly via the radb. See few lines below. task status is routed via otdb (and then ends up in radb automatically)
# it might be that editing the start/end time results in a (rabd)task status update (for example to 'conflict' due to conflicting claims)
# that's ok, since we'll update the status to the requested status later via otdb (see few lines below)
radb().updateTaskAndResourceClaims(task_id,
starttime=updatedTask.get('starttime'),
endtime=updatedTask.get('endtime'))
# ...then, handle status update which might trigger resource assignment,
# for which the above updated times are needed
if 'status' in updatedTask:
if isProductionEnvironment() and task['type'] == 'observation' and updatedTask['status'] == 'prescheduled':
abort(403, 'Scheduling of observations via the webscheduler by users is not (yet) allowed')
try:
#update status in otdb only
#the status change will propagate automatically into radb via other services (by design)
otdbrpc.taskSetStatus(task['otdb_id'], updatedTask['status'])
#we expect the status in otdb/radb to eventually become what we asked for...
expected_status = updatedTask['status']
#block until radb and mom task status are equal to the expected_statuses (with timeout)
start_wait = datetime.utcnow()
while True:
task = radb().getTask(otdb_id=task['otdb_id'])
otdb_status = otdbrpc.taskGetStatus(task['otdb_id'])
logger.info('waiting for otdb/radb task status to be in [%s].... otdb:%s radb:%s',
expected_status, otdb_status, task['status'])
if (task['status'] == expected_status and otdb_status == expected_status):
logger.info('otdb/radb task status now has the expected status %s otdb:%s radb:%s',
expected_status, otdb_status, task['status'])
break
if datetime.utcnow() - start_wait > timedelta(seconds=10):
logger.warning('timeout while waiting for otdb/radb task status to get the expected status %s otdb:%s radb:%s',
expected_status, otdb_status, task['status'])
break
time.sleep(0.1)
except RPCException as e:
if 'does not exist' in str(e):
# task does not exist (anymore) in otdb
#so remove it from radb as well (with cascading deletes on specification)
logger.warning('task with otdb_id %s does not exist anymore in OTDB. removing task radb_id %s from radb', task['otdb_id'], task['id'])
radb().deleteSpecification(task['specification_id'])
if 'data_pinned' in updatedTask:
task = radb().getTask(task_id)
if not task:
abort(404, "unknown task %s" % str(updatedTask))
curpc.setTaskDataPinned(task['otdb_id'], updatedTask['data_pinned'])
return "", 204
except Exception as e:
logger.error(e)
abort(404, str(e))
abort(406)
@app.route('/rest/tasks/<int:task_id>/cleanup', methods=['DELETE'])
def cleanupTaskData(task_id):
try:
delete_params = {}
if 'Content-Type' in request.headers and (request.headers['Content-Type'].startswith('application/json') or request.headers['Content-Type'].startswith('text/plain')):
delete_params = json_loads(request.data.decode('utf-8'))
task = radb().getTask(task_id)
if not task:
abort(404, 'No such task (id=%s)' % task_id)
logger.info("cleanup task data id=%s otdb_id=%s delete_params=%s", task_id, task['otdb_id'], delete_params)
result = curpc.removeTaskData(task['otdb_id'],
delete_is=delete_params.get('delete_is', True),
delete_cs=delete_params.get('delete_cs', True),
delete_uv=delete_params.get('delete_uv', True),
delete_im=delete_params.get('delete_im', True),
delete_img=delete_params.get('delete_img', True),
delete_pulp=delete_params.get('delete_pulp', True),
delete_scratch=delete_params.get('delete_scratch', True),
force=delete_params.get('force_delete', False))
logger.info(result)
return jsonify(result)
except Exception as e:
abort(500)
@app.route('/rest/tasks/<int:task_id>/datapath', methods=['GET'])
@gzipped
def getTaskDataPath(task_id):
try:
task = radb().getTask(task_id)
if not task:
abort(404, 'No such task (id=%s)' % task_id)
result = sqrpc.getPathForOTDBId(task['otdb_id'])
except Exception as e:
abort(500, str(e))
if result['found']:
return jsonify({'datapath': result['path']})
abort(404, result['message'] if result and 'message' in result else '')
@app.route('/rest/tasks/otdb/<int:otdb_id>/diskusage', methods=['GET'])
@gzipped
def getTaskDiskUsageByOTDBId(otdb_id):
try:
result = sqrpc.getDiskUsageForTaskAndSubDirectories(otdb_id=otdb_id, force_update=request.args.get('force')=='true')
except Exception as e:
abort(500, str(e))
if result['found']:
return jsonify(result)
abort(404, result['message'] if result and 'message' in result else '')
@app.route('/rest/tasks/<int:task_id>/diskusage', methods=['GET'])
@gzipped
def getTaskDiskUsage(task_id):
try:
result = sqrpc.getDiskUsageForTaskAndSubDirectories(radb_id=task_id, force_update=request.args.get('force')=='true')
except Exception as e:
abort(500, str(e))
if result['found']:
return jsonify(result)
abort(404, result['message'] if result and 'message' in result else '')
@app.route('/rest/tasks/<int:task_id>/parset', methods=['GET'])
@gzipped
def getParset(task_id):
try:
task = radb().getTask(task_id)
if not task:
abort(404)
return getParsetByOTDBId(task['otdb_id'])
except Exception as e:
abort(404)
abort(404)
@app.route('/rest/tasks/otdb/<int:otdb_id>/parset', methods=['GET'])
@gzipped
def getParsetByOTDBId(otdb_id):
try:
logger.info('getParsetByOTDBId(%s)', otdb_id)
parset = otdbrpc.taskGetSpecification(otdb_id=otdb_id)['specification']
return '\n'.join(['%s=%s' % (k,parset[k]) for k in sorted(parset.keys())]), 200, {'Content-Type': 'text/plain; charset=utf-8'}
except Exception as e:
abort(404)
abort(404)
@app.route('/rest/tasks/<int:task_id>/resourceclaims')
@gzipped
def taskResourceClaims(task_id):
return jsonify({'taskResourceClaims': radb().getResourceClaims(task_ids=[task_id], include_properties=True)})
@app.route('/rest/tasktypes')
@gzipped
def tasktypes():
result = radb().getTaskTypes()
result = sorted(result, key=lambda q: q['id'])
return jsonify({'tasktypes': result})
@app.route('/rest/taskstatustypes')
@gzipped
def getTaskStatusTypes():
result = radb().getTaskStatuses()
result = sorted(result, key=lambda q: q['id'])
return jsonify({'taskstatustypes': result})
@app.route('/rest/resourcetypes')
@gzipped
def resourcetypes():
result = radb().getResourceTypes()
result = sorted(result, key=lambda q: q['id'])
return jsonify({'resourcetypes': result})
@app.route('/rest/resourceclaimpropertytypes')
@gzipped
def resourceclaimpropertytypes():
result = radb().getResourceClaimPropertyTypes()
result = sorted(result, key=lambda q: q['id'])
return jsonify({'resourceclaimpropertytypes': result})
@app.route('/rest/projects')
@gzipped
def getProjects():
projects = []
try:
projects = momqueryrpc.getProjects()
projects = [x for x in projects if x['status_id'] in [1, 7]]
for project in projects:
project['mom_id'] = project.pop('mom2id')
except Exception as e:
logger.error(e)
projects.append({'name':'<unknown>', 'mom_id':-99, 'description': 'Container project for tasks for which we could not find a MoM project'})
projects.append({'name':'OTDB Only', 'mom_id':-98, 'description': 'Container project for tasks which exists only in OTDB'})
projects.append({'name':'Reservations', 'mom_id':-97, 'description': 'Container project for reservation tasks'})
return jsonify({'momprojects': projects})
@app.route('/rest/projects/<int:project_mom2id>')
@gzipped
def getProject(project_mom2id):
try:
projects = momqueryrpc.getProjects()
project = next(x for x in projects if x['mom2id'] == project_mom2id)
return jsonify({'momproject': project})
except StopIteration as e:
logger.error(e)
abort(404, "No project with mom2id %s" % project_mom2id)
except Exception as e:
logger.error(e)
abort(404, str(e))
@app.route('/rest/projects/<int:project_mom2id>/tasks')
@gzipped
def getProjectTasks(project_mom2id):
return getProjectTasksFromUntil(project_mom2id, None, None)
@app.route('/rest/projects/<int:project_mom2id>/tasks/<string:fromTimestamp>/<string:untilTimestamp>')
@gzipped
def getProjectTasksFromUntil(project_mom2id, fromTimestamp=None, untilTimestamp=None):
try:
if fromTimestamp and isinstance(fromTimestamp, str):
fromTimestamp = asDatetime(fromTimestamp)
if untilTimestamp and isinstance(untilTimestamp, str):
untilTimestamp = asDatetime(untilTimestamp)
task_mom2ids = momqueryrpc.getProjectTaskIds(project_mom2id)['task_mom2ids']
tasks = radb().getTasks(mom_ids=task_mom2ids, lower_bound=fromTimestamp, upper_bound=untilTimestamp)
updateTaskDetails(tasks)
return jsonify({'tasks': tasks})
except Exception as e:
logger.error(e)
abort(404, str(e))
@app.route('/rest/projects/<int:project_mom2id>/taskstimewindow')
@gzipped
def getProjectTasksTimeWindow(project_mom2id):
try:
task_mom2ids = momqueryrpc.getProjectTaskIds(project_mom2id)['task_mom2ids']
timewindow = radb().getTasksTimeWindow(mom_ids=task_mom2ids)
return jsonify(timewindow)
except Exception as e:
logger.error(e)
abort(404, str(e))
@app.route('/rest/projects/<int:project_mom2id>/diskusage')
@gzipped
def getProjectDiskUsageById(project_mom2id):
try:
project = momqueryrpc.getProject(project_mom2id=project_mom2id)
return getProjectDiskUsageByName(project['name'])
except StopIteration as e:
logger.error(e)
abort(404, "No project with mom2id %s" % project_mom2id)
except Exception as e:
logger.error(e)
abort(404, str(e))
@app.route('/rest/projects/<string:project_name>/diskusage')
@gzipped
def getProjectDiskUsageByName(project_name):
try:
result = sqrpc.getDiskUsageForProjectDirAndSubDirectories(project_name=project_name, force_update=request.args.get('force')=='true')
return jsonify(result)
except Exception as e:
logger.error(e)
abort(404, str(e))
@app.route('/rest/projects/diskusage')
@gzipped
def getProjectsDiskUsage():
try:
result = sqrpc.getDiskUsageForProjectsDirAndSubDirectories(force_update=request.args.get('force')=='true')
return jsonify(result)
except Exception as e:
logger.error(e)
abort(404, str(e))
@app.route('/rest/momobjectdetails/<int:mom2id>')
@gzipped
def getMoMObjectDetails(mom2id):
details = momqueryrpc.getObjectDetails(mom2id)
details = list(details.values())[0] if details else None
if details:
details['project_mom_id'] = details.pop('project_mom2id')
details['object_mom_id'] = details.pop('object_mom2id')
return jsonify({'momobjectdetails': details})
@app.route('/rest/updates/<int:sinceChangeNumber>')
@gzipped
def getUpdateEventsSince(sinceChangeNumber):
changesSince = changeshandler.getChangesSince(sinceChangeNumber)
return jsonify({'changes': changesSince})
@app.route('/rest/mostRecentChangeNumber')
@gzipped
def getMostRecentChangeNumber():
mrcn = changeshandler.getMostRecentChangeNumber()
return jsonify({'mostRecentChangeNumber': mrcn})
@app.route('/rest/updates')
def getUpdateEvents():
return getUpdateEventsSince(-1)
@app.route('/rest/logEvents')
@gzipped
def getMostRecentLogEvents():
return getLogEventsSince(datetime.utcnow() - timedelta(hours=6))
@app.route('/rest/logEvents/<string:fromTimestamp>')
@gzipped
def getLogEventsSince(fromTimestamp=None):
if not fromTimestamp:
fromTimestamp = datetime.utcnow() - timedelta(hours=6)
eventsSince = changeshandler.getEventsSince(fromTimestamp)
return jsonify({'logEvents': eventsSince})
@app.route('/rest/lofarTime')
@gzipped
def getLofarTime():
return jsonify({'lofarTime': asIsoFormat(datetime.utcnow())})
#ugly method to generate html tables for all tasks
@app.route('/tasks.html')
@gzipped
def getTasksHtml():
tasks = radb().getTasks()
if not tasks:
abort(404)
updateTaskDetails(tasks)
html = '<!DOCTYPE html><html><head><title>Tasks</title><style>table, th, td {border: 1px solid black; border-collapse: collapse; padding: 4px;}</style></head><body><table style="width:100%">\n'
props = sorted(tasks[0].keys())
html += '<tr>%s</tr>\n' % ''.join('<th>%s</th>' % prop for prop in props)
for task in tasks:
html += '<tr>'
for prop in props:
if prop in task:
if prop == 'id':
html += '<td><a href="/rest/tasks/%s.html">%s</a></td> ' % (task[prop], task[prop])
else:
html += '<td>%s</td> ' % task[prop]
html += '</tr>\n'
html += '</table></body></html>\n'
return html
#ugly method to generate html tables for the task and it's claims
@app.route('/tasks/<int:task_id>.html', methods=['GET'])
@gzipped
def getTaskHtml(task_id):
task = radb().getTask(task_id)
if not task:
abort(404, 'No such task %s' % task_id)
task['name'] = 'Task %d' % task['id']
updateTaskDetails([task])
html = '<!DOCTYPE html><html><head><title>Tasks</title><style>table, th, td {border: 1px solid black; border-collapse: collapse; padding: 4px;}</style></head><body><table style="">\n'
html += '<h1>Task %s</h1>' % task_id
html += '<p><a href="/tasks/%s/log.html">%s log</a></p> ' % (task['id'], task['type'])
html += '<p><a href="/rest/tasks/%s/parset">view %s parset</a></p> ' % (task['id'], task['type'])
props = sorted(task.keys())
html += '<tr><th>key</th><th>value</th></tr>\n'
for prop in props:
html += '<tr><td>%s</td>' % prop
if prop == 'id':
html += '<td><a href="/tasks/%s.html">%s</a></td> ' % (task[prop], task[prop])
elif prop == 'predecessor_ids' or prop == 'successor_ids':
ids = task[prop]
if ids:
html += '<td>%s</td> ' % ', '.join('<a href="/tasks/%s.html">%s</a>' % (id, id) for id in ids)
else:
html += '<td></td> '
else:
html += '<td>%s</td> ' % task[prop]
html += '</tr>'
html += '</table>\n<br>'
claims = radb().getResourceClaims(task_ids=[task_id], extended=True, include_properties=True)
if claims:
html += '<h1>Claims</h1>'
for claim in claims:
html += '<table>'
for claim_key,claim_value in list(claim.items()):
if claim_key == 'properties':
html += '<tr><td>properties</td><td><table>'
if claim_value:
propnames = sorted(claim_value[0].keys())
html += '<tr>%s</tr>\n' % ''.join('<th>%s</th>' % propname for propname in propnames)
for prop in claim_value:
html += '<tr>%s</tr>\n' % ''.join('<td>%s</td>' % prop[propname] for propname in propnames)
html += '</table></td></tr>'
elif claim_key == 'saps':
html += '<tr><td>saps</td><td><table>'
saps = claim_value
if saps:
sap_keys = ['sap_nr', 'properties']
html += '<tr>%s</tr>\n' % ''.join('<th>%s</th>' % sap_key for sap_key in sap_keys)
for sap in saps:
html += '<tr>'
for sap_key in sap_keys:
if sap_key == 'properties':
html += '<td><table>'
sap_props = sap[sap_key]
if sap_props:
propnames = sorted(sap_props[0].keys())
html += '<tr>%s</tr>\n' % ''.join('<th>%s</th>' % propname for propname in propnames)
for prop in sap_props:
html += '<tr>%s</tr>\n' % ''.join('<td>%s</td>' % prop[propname] for propname in propnames)
html += '</table></td>'
else:
html += '<td>%s</td>' % (sap[sap_key])
html += '</tr>'
html += '</table></td></tr>'
else:
html += '<tr><td>%s</td><td>%s</td></tr>' % (claim_key,claim_value)
html += '</table>'
html += '<br>'
html += '</body></html>\n'
return html
@app.route('/rest/tasks/<int:task_id>/resourceclaims.html', methods=['GET'])
@gzipped
def resourceClaimsForTaskHtml(task_id):
claims = radb().getResourceClaims(task_ids=[task_id], extended=True, include_properties=True)
if not claims:
abort(404, 'No resource claims for task %s' % task_id)
html = '<!DOCTYPE html><html><head><title>Tasks</title><style>table, th, td {border: 1px solid black; border-collapse: collapse; padding: 4px;}</style></head><body><table style="">\n'
for claim in claims:
html += '<tr><td>%s</td>' % claim
html += '</table></body></html>\n'
return html
@app.route('/tasks/<int:task_id>/log.html', methods=['GET'])
@gzipped
def getTaskLogHtml(task_id):
task = radb().getTask(task_id)
cmd = []
if task['type'] == 'pipeline':
cmd = ['ssh', 'lofarsys@head01.cep4.control.lofar', 'cat /data/log/pipeline-%s-*.log' % task['otdb_id']]
else:
cmd = ['ssh', 'mcu001.control.lofar', 'cat /opt/lofar/var/log/mcu001\\:ObservationControl\\[0\\]\\{%s\\}.log*' % task['otdb_id']]
logger.info(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = communicate_returning_strings(proc)
if proc.returncode == 0:
return out, 200, {'Content-Type': 'text/plain; charset=utf-8'}
else:
return err, 500, {'Content-Type': 'text/plain; charset=utf-8'}
def main():
# make sure we run in UTC timezone
import os
os.environ['TZ'] = 'UTC'
# Check the invocation arguments
parser = OptionParser('%prog [options]',
description='run the resource assignment editor web service')
parser.add_option('--webserver_port', dest='webserver_port', type='int', default=7412, help='port number on which to host the webservice, default: %default')
parser.add_option('-q', '--broker', dest='broker', type='string', default=DEFAULT_BROKER, help='Address of the qpid broker, default: %default')
parser.add_option('--exchange', dest='exchange', type='string', default=DEFAULT_BUSNAME, help='Name of the bus exchange on the qpid broker, default: %default')
parser.add_option('-V', '--verbose', dest='verbose', action='store_true', help='verbose logging')
parser.add_option_group(dbcredentials.options_group(parser))
parser.set_defaults(dbcredentials="RADB")
(options, args) = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG if options.verbose else logging.INFO)
global _radb_dbcreds
_radb_dbcreds = dbcredentials.parse_options(options)
if _radb_dbcreds.database:
logger.info("Using dbcreds for direct RADB access: %s" % _radb_dbcreds.stringWithHiddenPassword())
else:
_radb_dbcreds = None
global rarpc
rarpc = RADBRPC.create(exchange=options.exchange, broker=options.broker)
global otdbrpc
otdbrpc = OTDBRPC.create(exchange=options.exchange, broker=options.broker)
global curpc
curpc = CleanupRPC.create(exchange=options.exchange, broker=options.broker)
global sqrpc
sqrpc = StorageQueryRPC.create(exchange=options.exchange, timeout=10, broker=options.broker)
global momqueryrpc
momqueryrpc = MoMQueryRPC.create(exchange=options.exchange, timeout=10, broker=options.broker)
global changeshandler
changeshandler = ChangesHandler(exchange=options.exchange,
broker=options.broker, momqueryrpc=momqueryrpc, radbrpc=rarpc, sqrpc=sqrpc)
with changeshandler, rarpc, otdbrpc, curpc, sqrpc, momqueryrpc:
'''Start the webserver'''
app.run(debug=options.verbose, threaded=True, host='0.0.0.0', port=options.webserver_port)
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/contrib/kernel_methods/python/losses.py | 31 | 5600 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of kernel-methods-related loss operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.losses import losses
def sparse_multiclass_hinge_loss(
labels,
logits,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds Ops for computing the multiclass hinge loss.
The implementation is based on the following paper:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines
by Crammer and Singer.
link: http://jmlr.csail.mit.edu/papers/volume2/crammer01a/crammer01a.pdf
This is a generalization of standard (binary) hinge loss. For a given instance
with correct label c*, the loss is given by:
loss = max_{c != c*} logits_c - logits_{c*} + 1.
or equivalently
loss = max_c { logits_c - logits_{c*} + I_{c != c*} }
where I_{c != c*} = 1 if c != c* and 0 otherwise.
Args:
labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to
the ground truth. Each entry must be an index in `[0, num_classes)`.
logits: `Tensor` of shape [batch_size, num_classes] corresponding to the
unscaled logits. Its dtype should be either `float32` or `float64`.
weights: Optional (python) scalar or `Tensor`. If a non-scalar `Tensor`, its
rank should be either 1 ([batch_size]) or 2 ([batch_size, 1]).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is a scalar.
Raises:
ValueError: If `logits`, `labels` or `weights` have invalid or inconsistent
shapes.
ValueError: If `labels` tensor has invalid dtype.
"""
with ops.name_scope(scope, 'sparse_multiclass_hinge_loss', (logits,
labels)) as scope:
# Check logits Tensor has valid rank.
logits_shape = logits.get_shape()
logits_rank = logits_shape.ndims
if logits_rank != 2:
raise ValueError(
'logits should have rank 2 ([batch_size, num_classes]). Given rank is'
' {}'.format(logits_rank))
batch_size, num_classes = logits_shape[0].value, logits_shape[1].value
logits = math_ops.to_float(logits)
# Check labels have valid type.
if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64:
raise ValueError(
'Invalid dtype for labels: {}. Acceptable dtypes: int32 and int64'.
format(labels.dtype))
# Check labels and weights have valid ranks and are consistent.
labels_rank = labels.get_shape().ndims
if labels_rank not in [1, 2]:
raise ValueError(
'labels should have rank 1 ([batch_size]) or 2 ([batch_size, 1]). '
'Given rank is {}'.format(labels_rank))
with ops.control_dependencies([
check_ops.assert_less(labels, math_ops.cast(num_classes, labels.dtype))
]):
labels = array_ops.reshape(labels, shape=[-1])
weights = ops.convert_to_tensor(weights)
weights_rank = weights.get_shape().ndims
if weights_rank not in [0, 1, 2]:
raise ValueError(
'non-scalar weights should have rank 1 ([batch_size]) or 2 '
'([batch_size, 1]). Given rank is {}'.format(labels_rank))
if weights_rank > 0:
weights = array_ops.reshape(weights, shape=[-1])
# Check weights and labels have the same number of elements.
weights.get_shape().assert_is_compatible_with(labels.get_shape())
# Compute the logits tensor corresponding to the correct class per instance.
example_indices = array_ops.reshape(
math_ops.range(batch_size), shape=[batch_size, 1])
indices = array_ops.concat(
[
example_indices,
array_ops.reshape(
math_ops.cast(labels, example_indices.dtype),
shape=[batch_size, 1])
],
axis=1)
label_logits = array_ops.reshape(
array_ops.gather_nd(params=logits, indices=indices),
shape=[batch_size, 1])
one_cold_labels = array_ops.one_hot(
indices=labels, depth=num_classes, on_value=0.0, off_value=1.0)
margin = logits - label_logits + one_cold_labels
margin = nn_ops.relu(margin)
loss = math_ops.reduce_max(margin, axis=1)
return losses.compute_weighted_loss(
loss, weights, scope, loss_collection, reduction=reduction)
| mit |
caesar2164/edx-platform | lms/djangoapps/instructor_task/api.py | 11 | 20895 | """
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
from collections import Counter
import hashlib
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from lms.djangoapps.instructor_task.models import InstructorTask
from lms.djangoapps.instructor_task.tasks import (
rescore_problem,
reset_problem_attempts,
delete_problem_state,
send_bulk_course_email,
calculate_problem_responses_csv,
calculate_grades_csv,
calculate_problem_grade_report,
calculate_students_features_csv,
cohort_students,
enrollment_report_features_csv,
calculate_may_enroll_csv,
exec_summary_report_csv,
course_survey_report_csv,
generate_certificates,
proctored_exam_results_csv,
export_ora2_data,
)
from certificates.models import CertificateGenerationHistory
from lms.djangoapps.instructor_task.api_helper import (
check_arguments_for_rescoring,
encode_problem_and_student_input,
encode_entrance_exam_and_student_input,
check_entrance_exam_problems_for_rescoring,
submit_task,
)
from bulk_email.models import CourseEmail
from util import milestones_helpers
class SpecificStudentIdMissingError(Exception):
"""
Exception indicating that a student id was not provided when generating a certificate for a specific student.
"""
pass
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, usage_key=None, student=None, task_type=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match a particular problem, a student, and/or a task type.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if usage_key is not None or student is not None:
_, task_key = encode_problem_and_student_input(usage_key, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
if task_type is not None:
instructor_tasks = instructor_tasks.filter(task_type=task_type)
return instructor_tasks.order_by('-id')
def get_entrance_exam_instructor_task_history(course_id, usage_key=None, student=None): # pylint: disable=invalid-name
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match an entrance exam and student if present.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if usage_key is not None or student is not None:
_, task_key = encode_entrance_exam_and_student_input(usage_key, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
return instructor_tasks.order_by('-id')
# Disabling invalid-name because this fn name is longer than 30 chars.
def submit_rescore_problem_for_student(request, usage_key, student, only_if_higher=False): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
task_type = 'rescore_problem_if_higher' if only_if_higher else 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key, student)
task_input.update({'only_if_higher': only_if_higher})
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_problem_for_all_students(request, usage_key, only_if_higher=False): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem_if_higher' if only_if_higher else 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key)
task_input.update({'only_if_higher': only_if_higher})
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_entrance_exam_for_student(request, usage_key, student=None, only_if_higher=False): # pylint: disable=invalid-name
"""
Request entrance exam problems to be re-scored as a background task.
The entrance exam problems will be re-scored for given student or if student
is None problems for all students who have accessed the entrance exam.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being re-scored, or NotImplementedError if the problem doesn't
support rescoring.
"""
# check problems for rescoring: let exceptions return up to the caller.
check_entrance_exam_problems_for_rescoring(usage_key)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem_if_higher' if only_if_higher else 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
task_input.update({'only_if_higher': only_if_higher})
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `usage_key`, which must be a :class:`Location`.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
"""
# check arguments: make sure that the usage_key is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_item(usage_key)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_reset_problem_attempts_in_entrance_exam(request, usage_key, student): # pylint: disable=invalid-name
"""
Request to have attempts reset for a entrance exam as a background task.
Problem attempts for all problems in entrance exam will be reset
for specified student. If student is None problem attempts will be
reset for all students.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being reset.
"""
# check arguments: make sure entrance exam(section) exists for given usage_key
modulestore().get_item(usage_key)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `usage_key`, which must be a :class:`Location`.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
"""
# check arguments: make sure that the usage_key is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_item(usage_key)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_delete_entrance_exam_state_for_student(request, usage_key, student): # pylint: disable=invalid-name
"""
Requests reset of state for entrance exam as a background task.
Module state for all problems in entrance exam will be deleted
for specified student.
All User Milestones of entrance exam will be removed for the specified student
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being reset.
"""
# check arguments: make sure entrance exam(section) exists for given usage_key
modulestore().get_item(usage_key)
# Remove Content milestones that user has completed
milestones_helpers.remove_course_content_user_milestones(
course_key=usage_key.course_key,
content_key=usage_key,
user=student,
relationship='fulfills'
)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_bulk_course_email(request, course_key, email_id):
"""
Request to have bulk email sent as a background task.
The specified CourseEmail object will be sent be updated for all students who have enrolled
in a course. Parameters are the `course_key` and the `email_id`, the id of the CourseEmail object.
AlreadyRunningError is raised if the same recipients are already being emailed with the same
CourseEmail object.
"""
# Assume that the course is defined, and that the user has already been verified to have
# appropriate access to the course. But make sure that the email exists.
# We also pull out the targets argument here, so that is displayed in
# the InstructorTask status.
email_obj = CourseEmail.objects.get(id=email_id)
# task_input has a limit to the size it can store, so any target_type with count > 1 is combined and counted
targets = Counter([target.target_type for target in email_obj.targets.all()])
targets = [
target if count <= 1 else
"{} {}".format(count, target)
for target, count in targets.iteritems()
]
task_type = 'bulk_course_email'
task_class = send_bulk_course_email
task_input = {'email_id': email_id, 'to_option': targets}
task_key_stub = str(email_id)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_problem_responses_csv(request, course_key, problem_location): # pylint: disable=invalid-name
"""
Submits a task to generate a CSV file containing all student
answers to a given problem.
Raises AlreadyRunningError if said file is already being updated.
"""
task_type = 'problem_responses_csv'
task_class = calculate_problem_responses_csv
task_input = {'problem_location': problem_location}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_grades_csv(request, course_key):
"""
AlreadyRunningError is raised if the course's grades are already being updated.
"""
task_type = 'grade_course'
task_class = calculate_grades_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_problem_grade_report(request, course_key):
"""
Submits a task to generate a CSV grade report containing problem
values.
"""
task_type = 'grade_problems'
task_class = calculate_problem_grade_report
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_students_features_csv(request, course_key, features):
"""
Submits a task to generate a CSV containing student profile info.
Raises AlreadyRunningError if said CSV is already being updated.
"""
task_type = 'profile_info_csv'
task_class = calculate_students_features_csv
task_input = features
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_detailed_enrollment_features_csv(request, course_key): # pylint: disable=invalid-name
"""
Submits a task to generate a CSV containing detailed enrollment info.
Raises AlreadyRunningError if said CSV is already being updated.
"""
task_type = 'detailed_enrollment_report'
task_class = enrollment_report_features_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_may_enroll_csv(request, course_key, features):
"""
Submits a task to generate a CSV file containing information about
invited students who have not enrolled in a given course yet.
Raises AlreadyRunningError if said file is already being updated.
"""
task_type = 'may_enroll_info_csv'
task_class = calculate_may_enroll_csv
task_input = {'features': features}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_executive_summary_report(request, course_key):
"""
Submits a task to generate a HTML File containing the executive summary report.
Raises AlreadyRunningError if HTML File is already being updated.
"""
task_type = 'exec_summary_report'
task_class = exec_summary_report_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_course_survey_report(request, course_key):
"""
Submits a task to generate a HTML File containing the executive summary report.
Raises AlreadyRunningError if HTML File is already being updated.
"""
task_type = 'course_survey_report'
task_class = course_survey_report_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_proctored_exam_results_report(request, course_key, features): # pylint: disable=invalid-name
"""
Submits a task to generate a HTML File containing the executive summary report.
Raises AlreadyRunningError if HTML File is already being updated.
"""
task_type = 'proctored_exam_results_report'
task_class = proctored_exam_results_csv
task_input = {'features': features}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_cohort_students(request, course_key, file_name):
"""
Request to have students cohorted in bulk.
Raises AlreadyRunningError if students are currently being cohorted.
"""
task_type = 'cohort_students'
task_class = cohort_students
task_input = {'file_name': file_name}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_export_ora2_data(request, course_key):
"""
AlreadyRunningError is raised if an ora2 report is already being generated.
"""
task_type = 'export_ora2_data'
task_class = export_ora2_data
task_input = {}
task_key = ''
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def generate_certificates_for_students(request, course_key, student_set=None, specific_student_id=None): # pylint: disable=invalid-name
"""
Submits a task to generate certificates for given students enrolled in the course.
Arguments:
course_key : Course Key
student_set : Semantic for student collection for certificate generation.
Options are:
'all_whitelisted': All Whitelisted students.
'whitelisted_not_generated': Whitelisted students which does not got certificates yet.
'specific_student': Single student for certificate generation.
specific_student_id : Student ID when student_set is 'specific_student'
Raises AlreadyRunningError if certificates are currently being generated.
Raises SpecificStudentIdMissingError if student_set is 'specific_student' and specific_student_id is 'None'
"""
if student_set:
task_type = 'generate_certificates_student_set'
task_input = {'student_set': student_set}
if student_set == 'specific_student':
task_type = 'generate_certificates_certain_student'
if specific_student_id is None:
raise SpecificStudentIdMissingError(
"Attempted to generate certificate for a single student, "
"but no specific student id provided"
)
task_input.update({'specific_student_id': specific_student_id})
else:
task_type = 'generate_certificates_all_student'
task_input = {}
task_class = generate_certificates
task_key = ""
instructor_task = submit_task(request, task_type, task_class, course_key, task_input, task_key)
CertificateGenerationHistory.objects.create(
course_id=course_key,
generated_by=request.user,
instructor_task=instructor_task,
is_regeneration=False
)
return instructor_task
def regenerate_certificates(request, course_key, statuses_to_regenerate):
"""
Submits a task to regenerate certificates for given students enrolled in the course.
Regenerate Certificate only if the status of the existing generated certificate is in 'statuses_to_regenerate'
list passed in the arguments.
Raises AlreadyRunningError if certificates are currently being generated.
"""
task_type = 'regenerate_certificates_all_student'
task_input = {}
task_input.update({"statuses_to_regenerate": statuses_to_regenerate})
task_class = generate_certificates
task_key = ""
instructor_task = submit_task(request, task_type, task_class, course_key, task_input, task_key)
CertificateGenerationHistory.objects.create(
course_id=course_key,
generated_by=request.user,
instructor_task=instructor_task,
is_regeneration=True
)
return instructor_task
| agpl-3.0 |
ambitioninc/django-query-builder | querybuilder/tests/models.py | 2 | 1679 | try:
from django.db.models import JSONField
except ImportError:
try:
from django.contrib.postgres.fields import JSONField
except ImportError:
from jsonfield import JSONField
from django.db import models
class User(models.Model):
"""
User model
"""
email = models.CharField(max_length=256)
class Account(models.Model):
"""
Account model
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=64)
last_name = models.CharField(max_length=64)
class Order(models.Model):
"""
Order model
"""
account = models.ForeignKey(Account, on_delete=models.CASCADE)
revenue = models.FloatField(null=True)
margin = models.FloatField()
margin_percent = models.FloatField()
time = models.DateTimeField()
class MetricRecord(models.Model):
"""
Example metric model
"""
other_value = models.FloatField(default=0)
data = JSONField()
class Uniques(models.Model):
"""
For testing upserts
"""
field1 = models.CharField(unique=True, max_length=16)
field2 = models.CharField(unique=True, max_length=16)
field3 = models.CharField(max_length=16)
field4 = models.CharField(max_length=16, default='default_value')
field5 = models.CharField(max_length=16, null=True, default=None)
field6 = models.CharField(max_length=16)
field7 = models.CharField(max_length=16)
field8 = JSONField(default={})
custom_field_name = models.CharField(
max_length=16, null=True, default='foo', db_column='actual_db_column_name'
)
class Meta:
unique_together = ('field6', 'field7')
| mit |
pytroll/mpop | mpop/satin/nwcsaf_msg.py | 2 | 115697 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, 2012, 2014.
# SMHI,
# Folkborgsvägen 1,
# Norrköping,
# Sweden
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# Marco Sassi <marco.sassi@meteoswiss.ch> for CRR, PC (partly), SPhR, PCPh, CRPh
# Jörg Asmus <joerg.asmus@dwd.de> for CRR, PC (partly), SPhR, PCPh, CRPH
# Ulrich Hamann <ulrich.hamann@meteoswiss.ch> for CMa, bugfix SPhR.cape, 1st version generic class MsgNwcsafClass
# This file is part of mpop.
# mpop is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# mpop is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# mpop. If not, see <http://www.gnu.org/licenses/>.
"""Plugin for reading NWCSAF MSG products hdf files.
"""
import ConfigParser
import os.path
from mpop import CONFIG_PATH
import mpop.channel
import numpy as np
import pyresample.utils
import glob
from mpop.utils import get_logger
from mpop.projector import get_area_def
from os.path import basename
LOG = get_logger('satin/nwcsaf_msg')
COMPRESS_LVL = 6
def pcs_def_from_region(region):
items = region.proj_dict.items()
return ' '.join([t[0] + '=' + t[1] for t in items])
def _get_area_extent(cfac, lfac, coff, loff, numcols, numlines):
"""Get the area extent from msg parameters.
"""
# h = 35785831.0, see area_def.cfg
xur = (numcols - coff) * 2 ** 16 / (cfac * 1.0)
xur = np.deg2rad(xur) * 35785831.0
xll = (-1 - coff) * 2 ** 16 / (cfac * 1.0)
xll = np.deg2rad(xll) * 35785831.0
xres = (xur - xll) / numcols
xur, xll = xur - xres / 2, xll + xres / 2
yll = (numlines - loff) * 2 ** 16 / (-lfac * 1.0)
yll = np.deg2rad(yll) * 35785831.0
yur = (-1 - loff) * 2 ** 16 / (-lfac * 1.0)
yur = np.deg2rad(yur) * 35785831.0
yres = (yur - yll) / numlines
yll, yur = yll + yres / 2, yur - yres / 2
print "msg_hdf _get_area_extent: xll, yll, xur, yur = ", xll, yll, xur, yur
return xll, yll, xur, yur
def get_area_extent(filename):
"""Get the area extent of the data in *filename*.
"""
import h5py
h5f = h5py.File(filename, 'r')
print "msg_hdf get_area_extent: CFAC, LFAC, COFF, LOFF, NC, NL = ", h5f.attrs["CFAC"], h5f.attrs["LFAC"], h5f.attrs["COFF"], h5f.attrs["LOFF"], h5f.attrs["NC"], h5f.attrs["NL"]
aex = _get_area_extent(h5f.attrs["CFAC"],
h5f.attrs["LFAC"],
h5f.attrs["COFF"],
h5f.attrs["LOFF"],
h5f.attrs["NC"],
h5f.attrs["NL"])
h5f.close()
return aex
def _get_palette(h5f, dsname):
try:
p = h5f[dsname].attrs['PALETTE']
return h5f[p].value
except KeyError:
return None
# ------------------------------------------------------------------
class MsgCloudMaskData(object):
"""NWCSAF/MSG Cloud Mask data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgCloudMask(mpop.channel.GenericChannel):
"""NWCSAF/MSG Cloud Mask data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
"""
def __init__(self, resolution=None):
mpop.channel.GenericChannel.__init__(self, "CloudMask")
self.filled = False
self.name = "CloudMask"
self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.CMa = None
self.CMa_DUST = None
self.CMa_QUALITY = None
self.CMa_VOLCANIC = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.CMa.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
def read(self, filename, calibrate=True):
"""Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data.
"""
import h5py
self.CMa = MsgCloudMaskData()
self.CMa_DUST = MsgCloudMaskData()
self.CMa_QUALITY = MsgCloudMaskData()
self.CMa_VOLCANIC = MsgCloudMaskData()
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The cloud mask data
print "... read cloud mask data"
h5d = h5f['CMa']
self.CMa.data = h5d[:, :]
self.CMa.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.CMa.offset = h5d.attrs["OFFSET"]
self.CMa.num_of_lines = h5d.attrs["N_LINES"]
self.CMa.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.CMa.num_of_lines,
self.CMa.num_of_columns)
self.CMa.product = h5d.attrs["PRODUCT"]
self.CMa.id = h5d.attrs["ID"]
self.CMa_palette = _get_palette(h5f, 'CMa')
# ------------------------
# The cloud mask dust data
print "... read cloud mask dust data"
h5d = h5f['CMa_DUST']
self.CMa_DUST.data = h5d[:, :]
self.CMa_DUST.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.CMa_DUST.offset = h5d.attrs["OFFSET"]
self.CMa_DUST.num_of_lines = h5d.attrs["N_LINES"]
self.CMa_DUST.num_of_columns = h5d.attrs["N_COLS"]
self.CMa_DUST.product = h5d.attrs["PRODUCT"]
self.CMa_DUST.id = h5d.attrs["ID"]
self.CMa_DUST_palette = _get_palette(h5f, 'CMa_DUST')
# ------------------------
# The cloud mask quality
print "... read cloud mask quality"
h5d = h5f['CMa_QUALITY']
self.CMa_QUALITY.data = h5d[:, :]
self.CMa_QUALITY.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.CMa_QUALITY.offset = h5d.attrs["OFFSET"]
self.CMa_QUALITY.num_of_lines = h5d.attrs["N_LINES"]
self.CMa_QUALITY.num_of_columns = h5d.attrs["N_COLS"]
self.CMa_QUALITY.product = h5d.attrs["PRODUCT"]
self.CMa_QUALITY.id = h5d.attrs["ID"]
# no palette for QUALITY
# ------------------------
h5d = h5f['CMa_VOLCANIC']
print "... read volcanic dust mask"
self.CMa_VOLCANIC.data = h5d[:, :]
self.CMa_VOLCANIC.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.CMa_VOLCANIC.offset = h5d.attrs["OFFSET"]
self.CMa_VOLCANIC.num_of_lines = h5d.attrs["N_LINES"]
self.CMa_VOLCANIC.num_of_columns = h5d.attrs["N_COLS"]
self.CMa_VOLCANIC.product = h5d.attrs["PRODUCT"]
self.CMa_VOLCANIC.id = h5d.attrs["ID"]
self.CMa_VOLCANIC_palette = _get_palette(h5f, 'CMa_VOLCANIC')
# ------------------------
h5f.close()
self.CMa = self.CMa.data
self.CMa_DUST = self.CMa_DUST.data
self.CMa_QUALITY = self.CMa_QUALITY.data
self.CMa_VOLCANIC = self.CMa_VOLCANIC.data
self.area = get_area_from_file(filename)
self.filled = True
def save(self, filename):
"""Save the current cloudtype object to hdf *filename*, in pps format.
"""
import h5py
cma = self.convert2pps()
LOG.info("Saving CMa hdf file...")
cma.save(filename)
h5f = h5py.File(filename, mode="a")
h5f.attrs["straylight_contaminated"] = self.qc_straylight
h5f.close()
LOG.info("Saving CMa hdf file done !")
def project(self, coverage):
"""Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgCloudMask()
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
retv.CMa = coverage.project_array(self.CMa)
retv.CMa_DUST = coverage.project_array(self.CMa_DUST)
retv.CMa_QUALITY = coverage.project_array(self.CMa_QUALITY)
retv.CMa_VOLCANIC = coverage.project_array(self.CMa_VOLCANIC)
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
def convert2pps(self):
"""Converts the NWCSAF/MSG Cloud Mask to the PPS format,
in order to have consistency in output format between PPS and MSG.
"""
import epshdf
retv = PpsCloudMask()
retv.region = epshdf.SafRegion()
retv.region.xsize = self.num_of_columns
retv.region.ysize = self.num_of_lines
retv.region.id = self.region_name
retv.region.pcs_id = self.projection_name
retv.region.pcs_def = pcs_def_from_region(self.area)
retv.region.area_extent = self.area.area_extent
retv.satellite_id = self.satid
retv.CMa_lut = pps_luts('CMa')
retv.CMa_DUST_lut = pps_luts('CMa_DUST')
retv.CMa_VOLCANIC_lut = pps_luts('CMa_VOLCANIC')
retv.CMa_des = "MSG SEVIRI Cloud Mask"
retv.CMa_DUST_des = 'MSG SEVIRI Cloud Mask DUST'
retv.CMa_QUALITY_des = 'MSG SEVIRI bitwise quality/processing flags'
retv.CMa_VOLCANIC_des = 'MSG SEVIRI Cloud Mask VOLCANIC'
retv.CMa = self.CMa.astype('B')
retv.CMa_DUST = self.CMa_DUST.astype('B')
retv.CMa_QUALITY = self.CMa_QUALITY.astype('B')
retv.CMa_VOLCANIC = self.CMa_VOLCANIC.astype('B')
return retv
def convert2nordrad(self):
return NordRadCType(self)
#-----------------------------------------------------------------------
# ------------------------------------------------------------------
class MsgNwcsafData(object):
"""NWCSAF/MSG data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgNwcsafClass(mpop.channel.GenericChannel):
"""NWCSAF/MSG data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
"""
def __init__(self, product, resolution=None):
mpop.channel.GenericChannel.__init__(self, product)
self.filled = False
self.name = product
self.var_names = None
self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
if product == "CloudMask":
self.CMa = None
self.CMa_DUST = None
self.CMa_QUALITY = None
self.CMa_VOLCANIC = None
elif product == "CT":
self.CT = None
self.CT_PHASE = None
self.CT_QUALITY = None
elif product == "CTTH":
self.CTTH_TEMPER = None
self.CTTH_HEIGHT = None
self.CTTH_PRESS = None
self.CTTH_EFFECT = None
self.CTTH_QUALITY = None
elif product == "CRR":
self.crr = None
self.crr_accum = None
self.crr_intensity = None
self.crr_quality = None
self.processing_flags = None
elif product == "PC":
self.probability_1 = None
self.processing_flags = None
elif product == "SPhR":
self.sphr_bl = None
self.sphr_cape = None
self.sphr_diffbl = None
self.sphr_diffhl = None
self.sphr_diffki = None
self.sphr_diffli = None
self.sphr_diffml = None
self.sphr_diffshw = None
self.sphr_difftpw = None
self.sphr_hl = None
self.sphr_ki = None
self.sphr_li = None
self.sphr_ml = None
self.sphr_quality = None
self.sphr_sflag = None
self.sphr_shw = None
self.sphr_tpw = None
elif product == "PCPh":
self.pcph_pc = MNone
self.pcph_quality = None
self.pcph_dataflag = None
self.processing_flags = None
elif product =="CRPh":
self.crph_crr = None
self.crph_accum = None
self.crph_iqf = None
self.crph_quality = None
self.crph_dataflag = None
self.processing_flags = None
else:
print "*** ERROR in MsgNWCSAF (nwcsaf_msg.py)"
print " unknown NWCSAF product: ", product
quit()
self.shape = None
self.satid = ""
self.qc_straylight = -1
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
def read(self, filename, calibrate=True):
"""Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data.
"""
import h5py
if self.name == "CTTH":
self.var_names = ('CTTH_TEMPER', 'CTTH_HEIGHT', 'CTTH_PRESS', 'CTTH_EFFECT', 'CTTH_QUALITY')
elif self.name == "CloudType":
self.var_names = ('CT', 'CT_PHASE', 'CT_QUALITY')
elif self.name == "CloudMask":
self.var_names = ('CMa', 'CMa_DUST', 'CMa_QUALITY', 'CMa_VOLCANIC')
elif self.name == "SPhR":
self.var_names = ('SPhR_BL','SPhR_CAPE','SPhR_HL','SPhR_KI','SPhR_LI','SPhR_ML','SPhR_QUALITY','SPhR_SHW','SPhR_TPW')
else:
print "*** ERROR in MsgNWCSAF read (nwcsaf_msg.py)"
print " unknown NWCSAF product: ", product
quit()
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
for var_name in self.var_names:
print "... read hdf5 variable ", var_name
h5d = h5f[var_name]
var1=MsgNwcsafData()
var1.data = h5d[:, :]
var1.scaling_factor = h5d.attrs["SCALING_FACTOR"]
var1.offset = h5d.attrs["OFFSET"]
var1.num_of_lines = h5d.attrs["N_LINES"]
var1.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (var1.num_of_lines,
var1.num_of_columns)
var1.product = h5d.attrs["PRODUCT"]
var1.id = h5d.attrs["ID"]
# copy temporal var1 to self.var_name
if calibrate:
print "... apply scaling_factor", var1.scaling_factor, " and offset ", var1.offset
setattr(self, var_name, var1.data*var1.scaling_factor
+var1.offset )
else:
setattr(self, var_name, var1.data)
# !!! is there a check needed, if the palette exists? !!!
# read 'product'_palette and copy it to self.'product'_palette
setattr(self, var_name+"_palette", _get_palette(h5f, var_name) )
h5f.close()
self.area = get_area_from_file(filename)
self.filled = True
def save(self, filename):
"""Save the current cloudtype object to hdf *filename*, in pps format.
"""
import h5py
cma = self.convert2pps()
LOG.info("Saving NWCSAF data as hdf file...")
cma.save(filename)
h5f = h5py.File(filename, mode="a")
h5f.attrs["straylight_contaminated"] = self.qc_straylight
h5f.close()
LOG.info("Saving NWCSAF data hdf file done !")
def project(self, coverage):
"""Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgNwcsafClass(self.name)
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
# loop for reprojecting data, e.g. retv.CMa = coverage.project_array(self.CMa)
for var_name in self.var_names:
setattr(retv, var_name, coverage.project_array(getattr(self, var_name)))
# !!! BUG !!! copy palette is missing
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
def convert2pps(self):
"""Converts the NWCSAF/MSG data set to the PPS format,
in order to have consistency in output format between PPS and MSG.
"""
import epshdf
retv = PpsCloudMask()
retv.region = epshdf.SafRegion()
retv.region.xsize = self.num_of_columns
retv.region.ysize = self.num_of_lines
retv.region.id = self.region_name
retv.region.pcs_id = self.projection_name
retv.region.pcs_def = pcs_def_from_region(self.area)
retv.region.area_extent = self.area.area_extent
retv.satellite_id = self.satid
# !!! UH: THIS PART IS TO BE DONE BY SOMEBODY WHO USES PPS !!!
# loop for intersting variables
for var_name in self.var_names:
# get look-up tables, e.g. retv.CMa_lut = pps_luts('CMa')
setattr( retv, var_name+"_lut", pps_luts(var_name) )
# get describing strings, e.g. retv.CMa_des = "MSG SEVIRI Cloud Mask"
setattr( retv, var_name+"_des", pps_description(var_name) )
# if not processing flag, get astype, e.g. retv.cloudtype = self.cloudtype.astype('B')
if var_name.find("QUALITY") != -1 and var_name.find("flag") != -1:
setattr( retv, var_name, getattr(self, var_name).astype('B') )
elif var_name=="CT_QUALITY" or var_name=="qualityflag":
retv.qualityflag = ctype_procflags2pps(self.processing_flags)
elif var_name=="CTTH_QUALITY" or var_name=="processingflag":
retv.processingflag = ctth_procflags2pps(self.processing_flags)
elif var_name=="CMa_QUALITY" or var_name=="QUALITY":
print "*** WARNING, no conversion for CMA and SPhR products flags yet!"
# !!! UH: THIS PART IS TO BE DONE BY SOMEBODY WHO USES PPS !!!
return retv
def convert2nordrad(self):
return NordRadCType(self)
#-----------------------------------------------------------------------
class MsgCloudTypeData(object):
"""NWCSAF/MSG Cloud Type data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgCloudType(mpop.channel.GenericChannel):
"""NWCSAF/MSG Cloud Type data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self, "CloudType")
self.filled = False
self.name = "CloudType"
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.cloudtype = None
self.processing_flags = None
self.cloudphase = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
self.cloudtype_palette = None
self.cloudphase_palette = None
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.cloudtype.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
# ------------------------------------------------------------------
def read(self, filename, calibrate=True):
"""Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data.
"""
import h5py
self.cloudtype = MsgCloudTypeData()
self.processing_flags = MsgCloudTypeData()
self.cloudphase = MsgCloudTypeData()
LOG.debug("Filename = <" + str(filename) + ">")
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The cloudtype data
h5d = h5f['CT']
self.cloudtype.data = h5d[:, :]
self.cloudtype.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.cloudtype.offset = h5d.attrs["OFFSET"]
self.cloudtype.num_of_lines = h5d.attrs["N_LINES"]
self.cloudtype.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.cloudtype.num_of_lines,
self.cloudtype.num_of_columns)
self.cloudtype.product = h5d.attrs["PRODUCT"]
self.cloudtype.id = h5d.attrs["ID"]
self.cloudtype_palette = _get_palette(h5f, 'CT') / 255.0
# ------------------------
# The cloud phase data
h5d = h5f['CT_PHASE']
self.cloudphase.data = h5d[:, :]
self.cloudphase.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.cloudphase.offset = h5d.attrs["OFFSET"]
self.cloudphase.num_of_lines = h5d.attrs["N_LINES"]
self.cloudphase.num_of_columns = h5d.attrs["N_COLS"]
self.cloudphase.product = h5d.attrs["PRODUCT"]
self.cloudphase.id = h5d.attrs["ID"]
self.cloudphase_palette = _get_palette(h5f, 'CT_PHASE')
# ------------------------
# The cloudtype processing/quality flags
h5d = h5f['CT_QUALITY']
self.processing_flags.data = h5d[:, :]
self.processing_flags.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.processing_flags.offset = h5d.attrs["OFFSET"]
self.processing_flags.num_of_lines = h5d.attrs["N_LINES"]
self.processing_flags.num_of_columns = h5d.attrs["N_COLS"]
self.processing_flags.product = h5d.attrs["PRODUCT"]
self.processing_flags.id = h5d.attrs["ID"]
# ------------------------
h5f.close()
self.cloudtype = self.cloudtype.data
self.cloudphase = self.cloudphase.data
self.processing_flags = self.processing_flags.data
self.area = get_area_from_file(filename)
self.filled = True
def save(self, filename):
"""Save the current cloudtype object to hdf *filename*, in pps format.
"""
import h5py
ctype = self.convert2pps()
LOG.info("Saving CType hdf file...")
ctype.save(filename)
h5f = h5py.File(filename, mode="a")
h5f.attrs["straylight_contaminated"] = self.qc_straylight
h5f.close()
LOG.info("Saving CType hdf file done !")
def project(self, coverage):
"""Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgCloudType()
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
retv.cloudtype = coverage.project_array(self.cloudtype)
retv.cloudtype_palette = self.cloudtype_palette
retv.cloudphase = coverage.project_array(self.cloudphase)
retv.cloudphase_palette = self.cloudphase_palette
retv.processing_flags = \
coverage.project_array(self.processing_flags)
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
# is it necessary?
# def convert2nordrad(self):
# return NordRadCType(self)
class MsgCTTHData(object):
"""CTTH data object.
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgCTTH(mpop.channel.GenericChannel):
"""CTTH channel.
"""
def __init__(self, resolution=None):
mpop.channel.GenericChannel.__init__(self, "CTTH")
self.filled = False
self.name = "CTTH"
self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.cloudiness = None # Effective cloudiness
self.processing_flags = None
self.height = None
self.temperature = None
self.pressure = None
self.satid = ""
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
def read(self, filename, calibrate=True):
import h5py
self.cloudiness = MsgCTTHData() # Effective cloudiness
self.temperature = MsgCTTHData()
self.height = MsgCTTHData()
self.pressure = MsgCTTHData()
self.processing_flags = MsgCTTHData()
h5f = h5py.File(filename, 'r')
# The header
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The CTTH cloudiness data
h5d = h5f['CTTH_EFFECT']
self.cloudiness.data = h5d[:, :]
self.cloudiness.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.cloudiness.offset = h5d.attrs["OFFSET"]
self.cloudiness.num_of_lines = h5d.attrs["N_LINES"]
self.cloudiness.num_of_columns = h5d.attrs["N_COLS"]
self.cloudiness.product = h5d.attrs["PRODUCT"]
self.cloudiness.id = h5d.attrs["ID"]
# self.cloudiness.data = np.ma.masked_equal(self.cloudiness.data, 255)
# self.cloudiness.data = np.ma.masked_equal(self.cloudiness.data, 0)
self.cloudiness_palette = _get_palette(h5f, 'CTTH_EFFECT') / 255.0
# ------------------------
# The CTTH temperature data
h5d = h5f['CTTH_TEMPER']
self.temperature.data = h5d[:, :]
self.temperature.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.temperature.offset = h5d.attrs["OFFSET"]
self.temperature.num_of_lines = h5d.attrs["N_LINES"]
self.shape = (self.temperature.num_of_lines,
self.temperature.num_of_columns)
self.temperature.num_of_columns = h5d.attrs["N_COLS"]
self.temperature.product = h5d.attrs["PRODUCT"]
self.temperature.id = h5d.attrs["ID"]
# self.temperature.data = np.ma.masked_equal(self.temperature.data, 0)
if calibrate:
self.temperature = (self.temperature.data *
self.temperature.scaling_factor +
self.temperature.offset)
else:
self.temperature = self.temperature.data
self.temperature_palette = _get_palette(h5f, 'CTTH_TEMPER') / 255.0
# ------------------------
# The CTTH pressure data
h5d = h5f['CTTH_PRESS']
self.pressure.data = h5d[:, :]
self.pressure.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.pressure.offset = h5d.attrs["OFFSET"]
self.pressure.num_of_lines = h5d.attrs["N_LINES"]
self.pressure.num_of_columns = h5d.attrs["N_COLS"]
self.pressure.product = h5d.attrs["PRODUCT"]
self.pressure.id = h5d.attrs["ID"]
# self.pressure.data = np.ma.masked_equal(self.pressure.data, 255)
# self.pressure.data = np.ma.masked_equal(self.pressure.data, 0)
if calibrate:
self.pressure = (self.pressure.data *
self.pressure.scaling_factor +
self.pressure.offset)
else:
self.pressure = self.pressure.data
self.pressure_palette = _get_palette(h5f, 'CTTH_PRESS') / 255.0
# ------------------------
# The CTTH height data
h5d = h5f['CTTH_HEIGHT']
self.height.data = h5d[:, :]
self.height.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.height.offset = h5d.attrs["OFFSET"]
self.height.num_of_lines = h5d.attrs["N_LINES"]
self.height.num_of_columns = h5d.attrs["N_COLS"]
self.height.product = h5d.attrs["PRODUCT"]
self.height.id = h5d.attrs["ID"]
# self.height.data = np.ma.masked_equal(self.height.data, 255)
# self.height.data = np.ma.masked_equal(self.height.data, 0)
if calibrate:
self.height = (self.height.data *
self.height.scaling_factor +
self.height.offset)
else:
self.height = self.height.data
self.height_palette = _get_palette(h5f, 'CTTH_HEIGHT') / 255.0
# ------------------------
# The CTTH processing/quality flags
h5d = h5f['CTTH_QUALITY']
self.processing_flags.data = h5d[:, :]
self.processing_flags.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.processing_flags.offset = h5d.attrs["OFFSET"]
self.processing_flags.num_of_lines = \
h5d.attrs["N_LINES"]
self.processing_flags.num_of_columns = \
h5d.attrs["N_COLS"]
self.processing_flags.product = h5d.attrs["PRODUCT"]
self.processing_flags.id = h5d.attrs["ID"]
self.processing_flags = \
np.ma.masked_equal(self.processing_flags.data, 0)
h5f.close()
self.shape = self.height.shape
self.area = get_area_from_file(filename)
self.filled = True
def save(self, filename):
"""Save the current CTTH channel to HDF5 format.
"""
ctth = self.convert2pps()
LOG.info("Saving CTTH hdf file...")
ctth.save(filename)
LOG.info("Saving CTTH hdf file done !")
def project(self, coverage):
"""Project the current CTTH channel along the *coverage*
"""
dest_area = coverage.out_area
dest_area_id = dest_area.area_id
retv = MsgCTTH()
retv.temperature = coverage.project_array(self.temperature)
retv.height = coverage.project_array(self.height)
retv.pressure = coverage.project_array(self.pressure)
#retv.cloudiness = coverage.project_array(self.cloudiness)
retv.processing_flags = \
coverage.project_array(self.processing_flags)
retv.height_palette = self.height_palette
retv.pressure_palette = self.pressure_palette
retv.temperature_palette = self.temperature_palette
retv.area = dest_area
retv.region_name = dest_area_id
retv.projection_name = dest_area.proj_id
retv.num_of_columns = dest_area.x_size
retv.num_of_lines = dest_area.y_size
retv.shape = dest_area.shape
retv.name = self.name
retv.resolution = self.resolution
retv.filled = True
return retv
# ----------------------------------------
class MsgPCData(object):
"""NWCSAF/MSG Precipitating Clouds data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgPC(mpop.channel.GenericChannel):
"""NWCSAF/MSG Precipitating Clouds data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self, "PC")
self.filled = False
self.name = "PC"
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.probability_1 = None
self.processing_flags = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.probability_1.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
# ------------------------------------------------------------------
def read(self, filename, calibrate=True):
"""Reader for the NWCSAF/MSG precipitating clouds. Use *filename* to read data.
"""
import h5py
self.probability_1 = MsgPCData()
self.processing_flags = MsgPCData()
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The precipitating clouds data
h5d = h5f['PC_PROB1']
self.probability_1.data = h5d[:, :]
self.probability_1.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.probability_1.offset = h5d.attrs["OFFSET"]
self.probability_1.num_of_lines = h5d.attrs["N_LINES"]
self.probability_1.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.probability_1.num_of_lines,
self.probability_1.num_of_columns)
self.probability_1.product = h5d.attrs["PRODUCT"]
self.probability_1.id = h5d.attrs["ID"]
if calibrate:
self.probability_1 = (self.probability_1.data *
self.probability_1.scaling_factor +
self.probability_1.offset)
else:
self.probability_1 = self.probability_1.data
self.probability_1_palette = _get_palette(h5f, 'PC_PROB1') / 255.0
# ------------------------
# The PC processing/quality flags
h5d = h5f['PC_QUALITY']
self.processing_flags.data = h5d[:, :]
self.processing_flags.scaling_factor = \
h5d.attrs["SCALING_FACTOR"]
self.processing_flags.offset = h5d.attrs["OFFSET"]
self.processing_flags.num_of_lines = h5d.attrs["N_LINES"]
self.processing_flags.num_of_columns = h5d.attrs["N_COLS"]
self.processing_flags.product = h5d.attrs["PRODUCT"]
self.processing_flags.id = h5d.attrs["ID"]
self.processing_flags = np.ma.masked_equal(
self.processing_flags.data, 0)
# ------------------------
h5f.close()
self.area = get_area_from_file(filename)
self.filled = True
def project(self, coverage):
"""Project the current PC channel along the *coverage*
"""
dest_area = coverage.out_area
dest_area_id = dest_area.area_id
retv = MsgPC()
retv.probability_1 = coverage.project_array(self.probability_1)
retv.processing_flags = \
coverage.project_array(self.processing_flags)
retv.probability_1_palette = self.probability_1_palette
retv.area = dest_area
retv.region_name = dest_area_id
retv.projection_name = dest_area.proj_id
retv.num_of_columns = dest_area.x_size
retv.num_of_lines = dest_area.y_size
retv.shape = dest_area.shape
retv.name = self.name
retv.resolution = self.resolution
retv.filled = True
return retv
# ------------------------------------------------------------------
def get_bit_from_flags(arr, nbit):
"""I don't know what this function does.
"""
res = np.bitwise_and(np.right_shift(arr, nbit), 1)
return res.astype('b')
# NEU Anfang NEW Beginn PyTroll-Workshop Kopenhagen 2014
class MsgCRRData(object):
"""NWCSAF/MSG Convective Rain Rate data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgCRR(mpop.channel.GenericChannel):
"""NWCSAF/MSG Convective Rain Rate data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self, "CRR")
self.filled = False
self.name = "CRR"
# self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.crr = None
self.crr_accum = None
self.crr_intensity = None
self.crr_quality = None
self.crr_dataflag = None
self.processing_flags = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
self.crr_palette = None
self.crr_accum_palette = None
self.crr_intensity_palette = None
self.crr_quality_palette = None
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.crr.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
# ------------------------------------------------------------------
def read(self, filename, calibrate=True):
"""Reader for the . Use *filename* to read data.
"""
import h5py
self.crr = MsgCRRData()
self.crr_accum = MsgCRRData()
self.crr_intensity = MsgCRRData()
self.crr_quality = MsgCRRData()
self.processing_flags = MsgCRRData()
LOG.debug("Filename = <" + str(filename) + ">")
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The CRR data
h5d = h5f['CRR']
self.crr.data = h5d[:, :]
self.crr.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crr.offset = h5d.attrs["OFFSET"]
self.crr.num_of_lines = h5d.attrs["N_LINES"]
self.crr.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.crr.num_of_lines,
self.crr.num_of_columns)
self.crr.product = h5d.attrs["PRODUCT"]
self.crr.id = h5d.attrs["ID"]
if calibrate:
self.crr = (self.crr.data *
self.crr.scaling_factor +
self.crr.offset)
else:
self.crr = self.crr.data
self.crr_palette = _get_palette(h5f, 'CRR') / 255.0
# ------------------------
# The CRR ACCUM data
h5d = h5f['CRR_ACCUM']
self.crr_accum.data = h5d[:, :]
self.crr_accum.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crr_accum.offset = h5d.attrs["OFFSET"]
self.crr_accum.num_of_lines = h5d.attrs["N_LINES"]
self.crr_accum.num_of_columns = h5d.attrs["N_COLS"]
self.crr_accum.product = h5d.attrs["PRODUCT"]
self.crr_accum.id = h5d.attrs["ID"]
if calibrate:
self.crr_accum = (self.crr_accum.data *
self.crr_accum.scaling_factor +
self.crr_accum.offset)
else:
self.crr_accum = self.crr_accum.data
self.crr_accum_palette = _get_palette(h5f, 'CRR_ACCUM') / 255.0
# ------------------------
# The CRR Intensity data
h5d = h5f['CRR_INTENSITY']
self.crr_intensity.data = h5d[:, :]
self.crr_intensity.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crr_intensity.offset = h5d.attrs["OFFSET"]
self.crr_intensity.num_of_lines = h5d.attrs["N_LINES"]
self.crr_intensity.num_of_columns = h5d.attrs["N_COLS"]
self.crr_intensity.product = h5d.attrs["PRODUCT"]
self.crr_intensity.id = h5d.attrs["ID"]
if calibrate:
self.crr_intensity = (self.crr_intensity.data *
self.crr_intensity.scaling_factor +
self.crr_intensity.offset)
else:
self.crr_intensity = self.crr_intensity.data
self.crr_intensity_palette = _get_palette(h5f, 'CRR_INTENSITY') / 255.0
# ------------------------
# The CRR quality data
h5d = h5f['CRR_QUALITY']
self.crr_quality.data = h5d[:, :]
self.crr_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crr_quality.offset = h5d.attrs["OFFSET"]
self.crr_quality.num_of_lines = h5d.attrs["N_LINES"]
self.crr_quality.num_of_columns = h5d.attrs["N_COLS"]
self.crr_quality.product = h5d.attrs["PRODUCT"]
self.crr_quality.id = h5d.attrs["ID"]
if calibrate:
self.crr_quality = (self.crr_quality.data *
self.crr_quality.scaling_factor +
self.crr_quality.offset)
else:
self.crr_quality = self.crr_quality.data
self.crr_quality_palette = _get_palette(h5f, 'CRR_QUALITY')
# ------------------------
h5f.close()
#self.crr = self.crr.data
#self.crr_accum = self.crr_accum.data
#self.crr_intensity = self.crr_intensity.data
#self.crr_quality = self.crr_quality.data
#self.processing_flags = self.processing_flags.data
self.area = get_area_from_file(filename)
self.filled = True
def save(self, filename):
"""Save the current cloudtype object to hdf *filename*, in pps format.
"""
import h5py
ctype = self.convert2pps()
LOG.info("Saving CRR hdf file...")
ctype.save(filename)
h5f = h5py.File(filename, mode="a")
h5f.attrs["straylight_contaminated"] = self.qc_straylight
h5f.close()
LOG.info("Saving CRR hdf file done !")
def project(self, coverage):
"""Remaps the NWCSAF/MSG CRR to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgCRR()
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
retv.crr = coverage.project_array(self.crr)
retv.crr_palette = self.crr_palette
retv.crr_accum = coverage.project_array(self.crr_accum)
retv.crr_accum_palette = self.crr_accum_palette
retv.crr_intensity = coverage.project_array(self.crr_intensity)
retv.crr_intensity_palette = self.crr_intensity_palette
retv.crr_quality = coverage.project_array(self.crr_quality)
retv.crr_quality_palette = self.crr_quality_palette
#retv.processing_flags = \
# coverage.project_array(self.processing_flags)
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
# def convert2nordrad(self):
# return NordRadCType(self)
class MsgSPhRData(object):
"""NWCSAF/MSG Convective Rain Rate data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgSPhR(mpop.channel.GenericChannel):
"""NWCSAF/MSG SPhR data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
Palette now missing
"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self, "SPhR")
self.filled = False
self.name = "SPhR"
# self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.sphr = None
self.sphr_bl = None
self.sphr_cape = None
self.sphr_diffbl = None
self.sphr_diffhl = None
self.sphr_diffki = None
self.sphr_diffli = None
self.sphr_diffml = None
self.sphr_diffshw = None
self.sphr_difftpw = None
self.sphr_hl = None
self.sphr_ki = None
self.sphr_li = None
self.sphr_ml = None
self.sphr_quality = None
self.sphr_sflag = None
self.sphr_shw = None
self.sphr_tpw = None
self.processing_flags = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
self.sphr = None
self.sphr_bl_palette = None
self.sphr_cape_palette = None
self.sphr_diffbl_palette = None
self.sphr_diffhl_palette = None
self.sphr_diffki_palette = None
self.sphr_diffli_palette = None
self.sphr_diffml_palette = None
self.sphr_diffshw_palette = None
self.sphr_difftpw_palette = None
self.sphr_hl_palette = None
self.sphr_ki_palette = None
self.sphr_li_palette = None
self.sphr_ml_palette = None
self.sphr_quality_palette = None
self.sphr_sflag_palette = None
self.sphr_shw_palette = None
self.sphr_tpw_palette = None
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.sphr_bl.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
# ------------------------------------------------------------------
def read(self, filename, calibrate=True):
"""Reader for the . Use *filename* to read data.
"""
import h5py
# Erste Zeile notwendig?
self.sphr = MsgSPhRData()
self.sphr_bl = MsgSPhRData()
self.sphr_cape = MsgSPhRData()
self.sphr_diffbl = MsgSPhRData()
self.sphr_diffhl = MsgSPhRData()
self.sphr_diffki = MsgSPhRData()
self.sphr_diffli = MsgSPhRData()
self.sphr_diffml = MsgSPhRData()
self.sphr_diffshw = MsgSPhRData()
self.sphr_difftpw = MsgSPhRData()
self.sphr_hl = MsgSPhRData()
self.sphr_ki = MsgSPhRData()
self.sphr_li = MsgSPhRData()
self.sphr_ml = MsgSPhRData()
self.sphr_quality = MsgSPhRData()
self.sphr_sflag = MsgSPhRData()
self.sphr_shw = MsgSPhRData()
self.sphr_tpw = MsgSPhRData()
self.processing_flags = MsgSPhRData()
LOG.debug("Filename = <" + str(filename) + ">")
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The SPhR BL data
h5d = h5f['SPhR_BL']
self.sphr_bl.data = h5d[:, :]
self.sphr_bl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_bl.offset = h5d.attrs["OFFSET"]
self.sphr_bl.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_bl.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_bl.num_of_lines,
self.sphr_bl.num_of_columns)
self.sphr_bl.product = h5d.attrs["PRODUCT"]
self.sphr_bl.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_bl.data ) * ( self.sphr_bl.data <= 128 )
# apply scaling factor and offset
self.sphr_bl = mask * (self.sphr_bl.data *
self.sphr_bl.scaling_factor +
self.sphr_bl.offset)
else:
self.sphr_bl = self.sphr_bl.data
self.sphr_bl_palette = _get_palette(h5f, 'SPhR_BL') / 255.0
# The SPhR Cape data
h5d = h5f['SPhR_CAPE']
self.sphr_cape.data = h5d[:, :]
self.sphr_cape.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_cape.offset = h5d.attrs["OFFSET"]
self.sphr_cape.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_cape.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_cape.num_of_lines,
self.sphr_cape.num_of_columns)
self.sphr_cape.product = h5d.attrs["PRODUCT"]
self.sphr_cape.id = h5d.attrs["ID"]
if calibrate:
mask = ( 128 < self.sphr_cape.data )
# apply scaling factor and offset
self.sphr_cape = mask * (self.sphr_cape.data *
self.sphr_cape.scaling_factor +
self.sphr_cape.offset)
else:
self.sphr_cape = self.sphr_cape.data
#self.sphr_cape_palette = _get_palette(h5f, 'SPhR_CAPE') / 255.0
# The SPhR DIFFBL data
h5d = h5f['SPhR_DIFFBL']
self.sphr_diffbl.data = h5d[:, :]
self.sphr_diffbl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_diffbl.offset = h5d.attrs["OFFSET"]
self.sphr_diffbl.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_diffbl.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_diffbl.num_of_lines,
self.sphr_diffbl.num_of_columns)
self.sphr_diffbl.product = h5d.attrs["PRODUCT"]
self.sphr_diffbl.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_diffbl.data ) * ( self.sphr_diffbl.data <= 128 )
# apply scaling factor and offset
self.sphr_diffbl = mask * (self.sphr_diffbl.data *
self.sphr_diffbl.scaling_factor +
self.sphr_diffbl.offset)
else:
self.sphr_diffbl = self.sphr_diffbl.data
self.sphr_diffbl_palette = _get_palette(h5f, 'SPhR_DIFFBL') / 255.0
# The SPhR DIFFHL data
h5d = h5f['SPhR_DIFFHL']
self.sphr_diffhl.data = h5d[:, :]
self.sphr_diffhl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_diffhl.offset = h5d.attrs["OFFSET"]
self.sphr_diffhl.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_diffhl.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_diffhl.num_of_lines,
self.sphr_diffhl.num_of_columns)
self.sphr_diffhl.product = h5d.attrs["PRODUCT"]
self.sphr_diffhl.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_diffhl.data ) * ( self.sphr_diffhl.data <= 128 )
# apply scaling factor and offset
self.sphr_diffhl = mask * (self.sphr_diffhl.data *
self.sphr_diffhl.scaling_factor +
self.sphr_diffhl.offset)
else:
self.sphr_diffhl = self.sphr_diffhl.data
self.sphr_diffhl_palette = _get_palette(h5f, 'SPhR_DIFFHL') / 255.0
# The SPhR DIFFKI data
h5d = h5f['SPhR_DIFFKI']
self.sphr_diffki.data = h5d[:, :]
self.sphr_diffki.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_diffki.offset = h5d.attrs["OFFSET"]
self.sphr_diffki.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_diffki.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_diffki.num_of_lines,
self.sphr_diffki.num_of_columns)
self.sphr_diffki.product = h5d.attrs["PRODUCT"]
self.sphr_diffki.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_diffki.data ) * ( self.sphr_diffki.data <= 128 )
# apply scaling factor and offset
self.sphr_diffki = mask * (self.sphr_diffki.data *
self.sphr_diffki.scaling_factor +
self.sphr_diffki.offset)
else:
self.sphr_diffki = self.sphr_diffki.data
self.sphr_diffki_palette = _get_palette(h5f, 'SPhR_DIFFKI') / 255.0
# The SPhR DIFFLI data
h5d = h5f['SPhR_DIFFLI']
self.sphr_diffli.data = h5d[:, :]
self.sphr_diffli.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_diffli.offset = h5d.attrs["OFFSET"]
self.sphr_diffli.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_diffli.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_diffli.num_of_lines,
self.sphr_diffli.num_of_columns)
self.sphr_diffli.product = h5d.attrs["PRODUCT"]
self.sphr_diffli.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_diffli.data ) * ( self.sphr_diffli.data <= 128 )
# apply scaling factor and offset
self.sphr_diffli = mask * (self.sphr_diffli.data *
self.sphr_diffli.scaling_factor +
self.sphr_diffli.offset)
else:
self.sphr_diffli= self.sphr_diffli.data
self.sphr_diffli_palette = _get_palette(h5f, 'SPhR_DIFFLI') / 255.0
# The SPhR DIFFML data
h5d = h5f['SPhR_DIFFML']
self.sphr_diffml.data = h5d[:, :]
self.sphr_diffml.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_diffml.offset = h5d.attrs["OFFSET"]
self.sphr_diffml.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_diffml.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_diffml.num_of_lines,
self.sphr_diffml.num_of_columns)
self.sphr_diffml.product = h5d.attrs["PRODUCT"]
self.sphr_diffml.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_diffml.data ) * ( self.sphr_diffml.data <= 128 )
# apply scaling factor and offset
self.sphr_diffml = mask * (self.sphr_diffml.data *
self.sphr_diffml.scaling_factor +
self.sphr_diffml.offset)
else:
self.sphr_diffml = self.sphr_diffml.data
self.sphr_diffml_palette = _get_palette(h5f, 'SPhR_DIFFML') / 255.0
# The SPhR DIFFSHW data
h5d = h5f['SPhR_DIFFSHW']
self.sphr_diffshw.data = h5d[:, :]
self.sphr_diffshw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_diffshw.offset = h5d.attrs["OFFSET"]
self.sphr_diffshw.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_diffshw.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_diffshw.num_of_lines,
self.sphr_diffshw.num_of_columns)
self.sphr_diffshw.product = h5d.attrs["PRODUCT"]
self.sphr_diffshw.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_diffshw.data ) * ( self.sphr_diffshw.data <= 128 )
# apply scaling factor and offset
self.sphr_diffshw = mask * (self.sphr_diffshw.data *
self.sphr_diffshw.scaling_factor +
self.sphr_diffshw.offset)
else:
self.sphr_diffshw = self.sphr_diffshw.data
self.sphr_diffshw_palette = _get_palette(h5f, 'SPhR_DIFFSHW') / 255.0
# The SPhR DIFFTPW data
h5d = h5f['SPhR_DIFFTPW']
self.sphr_difftpw.data = h5d[:, :]
self.sphr_difftpw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_difftpw.offset = h5d.attrs["OFFSET"]
self.sphr_difftpw.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_difftpw.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_difftpw.num_of_lines,
self.sphr_difftpw.num_of_columns)
self.sphr_difftpw.product = h5d.attrs["PRODUCT"]
self.sphr_difftpw.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_difftpw.data ) * ( self.sphr_difftpw.data <= 128 )
# apply scaling factor and offset
self.sphr_difftpw = mask * (self.sphr_difftpw.data *
self.sphr_difftpw.scaling_factor +
self.sphr_difftpw.offset)
else:
self.sphr_difftpw = self.sphr_difftpw.data
self.sphr_difftpw_palette = _get_palette(h5f, 'SPhR_DIFFTPW') / 255.0
# The SPhR HL data
h5d = h5f['SPhR_HL']
self.sphr_hl.data = h5d[:, :]
self.sphr_hl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_hl.offset = h5d.attrs["OFFSET"]
self.sphr_hl.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_hl.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_hl.num_of_lines,
self.sphr_hl.num_of_columns)
self.sphr_hl.product = h5d.attrs["PRODUCT"]
self.sphr_hl.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_hl.data ) * ( self.sphr_hl.data <= 128 )
# apply scaling factor and offset
self.sphr_hl = mask * (self.sphr_hl.data *
self.sphr_hl.scaling_factor +
self.sphr_hl.offset)
else:
self.sphr_hl = self.sphr_hl.data
self.sphr_hl_palette = _get_palette(h5f, 'SPhR_HL') / 255.0
# The SPhR KI data
h5d = h5f['SPhR_KI']
self.sphr_ki.data = h5d[:, :]
self.sphr_ki.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_ki.offset = h5d.attrs["OFFSET"]
self.sphr_ki.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_ki.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_ki.num_of_lines,
self.sphr_ki.num_of_columns)
self.sphr_ki.product = h5d.attrs["PRODUCT"]
self.sphr_ki.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_ki.data ) * ( self.sphr_ki.data <= 128 )
# apply scaling factor and offset
self.sphr_ki = mask * (self.sphr_ki.data *
self.sphr_ki.scaling_factor +
self.sphr_ki.offset)
else:
self.sphr_ki = self.sphr_ki.data
self.sphr_ki_palette = _get_palette(h5f, 'SPhR_KI') / 255.0
# The SPhR LI data
h5d = h5f['SPhR_LI']
self.sphr_li.data = h5d[:, :]
self.sphr_li.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_li.offset = h5d.attrs["OFFSET"]
self.sphr_li.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_li.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_li.num_of_lines,
self.sphr_li.num_of_columns)
self.sphr_li.product = h5d.attrs["PRODUCT"]
self.sphr_li.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_li.data ) * ( self.sphr_li.data <= 128 )
# apply scaling factor and offset
self.sphr_li = mask * (self.sphr_li.data *
self.sphr_li.scaling_factor +
self.sphr_li.offset)
else:
self.sphr_li = self.sphr_li.data
self.sphr_li_palette = _get_palette(h5f, 'SPhR_LI') / 255.0
# The SPhR ML data
h5d = h5f['SPhR_ML']
self.sphr_ml.data = h5d[:, :]
self.sphr_ml.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_ml.offset = h5d.attrs["OFFSET"]
self.sphr_ml.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_ml.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_ml.num_of_lines,
self.sphr_ml.num_of_columns)
self.sphr_ml.product = h5d.attrs["PRODUCT"]
self.sphr_ml.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_ml.data ) * ( self.sphr_ml.data <= 128 )
# apply scaling factor and offset
self.sphr_ml = mask * (self.sphr_ml.data *
self.sphr_ml.scaling_factor +
self.sphr_ml.offset)
else:
self.sphr_ml = self.sphr_ml.data
self.sphr_ml_palette = _get_palette(h5f, 'SPhR_ML') / 255.0
# The SPhR QUALITY data
h5d = h5f['SPhR_QUALITY']
self.sphr_quality.data = h5d[:, :]
self.sphr_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_quality.offset = h5d.attrs["OFFSET"]
self.sphr_quality.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_quality.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_quality.num_of_lines,
self.sphr_quality.num_of_columns)
self.sphr_quality.product = h5d.attrs["PRODUCT"]
self.sphr_quality.id = h5d.attrs["ID"]
if calibrate:
mask = (self.sphr_quality.data != 0 )
# apply scaling factor and offset
self.sphr_quality = mask * (self.sphr_quality.data *
self.sphr_quality.scaling_factor +
self.sphr_quality.offset)
else:
self.sphr_quality = self.sphr_quality.data
# The SPhR SFLAG data
h5d = h5f['SPhR_SFLAG']
self.sphr_sflag.data = h5d[:, :]
self.sphr_sflag.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_sflag.offset = h5d.attrs["OFFSET"]
self.sphr_sflag.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_sflag.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_sflag.num_of_lines,
self.sphr_sflag.num_of_columns)
self.sphr_sflag.product = h5d.attrs["PRODUCT"]
self.sphr_sflag.id = h5d.attrs["ID"]
self.sphr_sflag = self.sphr_sflag.data
# The SPhR SHW data
h5d = h5f['SPhR_SHW']
self.sphr_shw.data = h5d[:, :]
self.sphr_shw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_shw.offset = h5d.attrs["OFFSET"]
self.sphr_shw.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_shw.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_shw.num_of_lines,
self.sphr_shw.num_of_columns)
self.sphr_shw.product = h5d.attrs["PRODUCT"]
self.sphr_shw.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_shw.data ) * ( self.sphr_shw.data <= 128 )
# apply scaling factor and offset
self.sphr_shw = mask * (self.sphr_shw.data *
self.sphr_shw.scaling_factor +
self.sphr_shw.offset)
else:
self.sphr_shw = self.sphr_shw.data
self.sphr_shw_palette = _get_palette(h5f, 'SPhR_SHW') / 255.0
# The SPhR TPW data
h5d = h5f['SPhR_TPW']
self.sphr_tpw.data = h5d[:, :]
self.sphr_tpw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.sphr_tpw.offset = h5d.attrs["OFFSET"]
self.sphr_tpw.num_of_lines = h5d.attrs["N_LINES"]
self.sphr_tpw.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.sphr_tpw.num_of_lines,
self.sphr_tpw.num_of_columns)
self.sphr_tpw.product = h5d.attrs["PRODUCT"]
self.sphr_tpw.id = h5d.attrs["ID"]
if calibrate:
mask = ( 8 <= self.sphr_tpw.data ) * ( self.sphr_tpw.data <= 128 )
# apply scaling factor and offset
self.sphr_tpw = mask * (self.sphr_tpw.data *
self.sphr_tpw.scaling_factor +
self.sphr_tpw.offset)
print self.sphr_tpw.min(), self.sphr_tpw.max()
else:
self.sphr_tpw = self.sphr_tpw.data
self.sphr_tpw_palette = _get_palette(h5f, 'SPhR_TPW') / 255.0
# ------------------------
h5f.close()
#self.sphr = self.sphr.data
#self.sphr_bl = self.sphr_bl.data
#self.sphr_cape = self.sphr_cape.data
#self.sphr_diffbl = self.sphr_diffbl.data
#self.sphr_diffhl = self.sphr_diffhl.data
#self.sphr_diffki = self.sphr_diffki.data
#self.sphr_diffli = self.sphr_diffli.data
#self.sphr_diffml = self.sphr_diffml.data
#self.sphr_diffshw = self.sphr_diffshw.data
#self.sphr_difftpw = self.sphr_difftpw.data
#self.sphr_hl = self.sphr_hl.data
#self.sphr_ki = self.sphr_ki.data
#self.sphr_li = self.sphr_li.data
#self.sphr_ml = self.sphr_ml.data
#self.sphr_quality = self.sphr_quality.data
#self.sphr_sflag = self.sphr_sflag.data
#self.sphr_shw = self.sphr_shw.data
#self.sphr_tpw = self.sphr_tpw.data
self.processing_flags = self.processing_flags.data
self.area = get_area_from_file(filename)
self.filled = True
def project(self, coverage):
"""Remaps the NWCSAF/MSG CRR to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgSPhR()
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
retv.sphr_bl = coverage.project_array(self.sphr_bl)
retv.sphr_bl_palette = self.sphr_bl_palette
retv.sphr_ml = coverage.project_array(self.sphr_ml)
retv.sphr_ml_palette = self.sphr_ml_palette
retv.sphr_hl = coverage.project_array(self.sphr_hl)
retv.sphr_hl_palette = self.sphr_hl_palette
retv.sphr_ki = coverage.project_array(self.sphr_ki)
retv.sphr_ki_palette = self.sphr_ki_palette
retv.sphr_li = coverage.project_array(self.sphr_li)
retv.sphr_li_palette = self.sphr_li_palette
retv.sphr_tpw = coverage.project_array(self.sphr_tpw)
retv.sphr_tpw_palette = self.sphr_tpw_palette
retv.sphr_cape = coverage.project_array(self.sphr_cape)
# no sphr_cape_palette
retv.sphr_quality = coverage.project_array(self.sphr_quality)
# no sphr_quality_palette
retv.sphr_sflag = coverage.project_array(self.sphr_sflag)
# no sphr_sflag_palette
retv.sphr_shw = coverage.project_array(self.sphr_shw)
retv.sphr_shw_palette = self.sphr_shw_palette
retv.sphr_diffbl = coverage.project_array(self.sphr_diffbl)
retv.sphr_diffbl_palette = self.sphr_diffbl_palette
retv.sphr_diffml = coverage.project_array(self.sphr_diffml)
retv.sphr_diffml_palette = self.sphr_diffml_palette
retv.sphr_diffhl = coverage.project_array(self.sphr_diffhl)
retv.sphr_diffhl_palette = self.sphr_diffhl_palette
retv.sphr_diffki = coverage.project_array(self.sphr_diffki)
retv.sphr_diffki_palette = self.sphr_diffki_palette
retv.sphr_diffli = coverage.project_array(self.sphr_diffli)
retv.sphr_diffli_palette = self.sphr_diffli_palette
retv.sphr_difftpw = coverage.project_array(self.sphr_difftpw)
retv.sphr_difftpw_palette = self.sphr_difftpw_palette
retv.sphr_diffshw = coverage.project_array(self.sphr_diffshw)
retv.sphr_diffshw_palette = self.sphr_diffshw_palette
# retv.processing_flags = \
# coverage.project_array(self.processing_flags)
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
class MsgPCPhData(object):
"""NWCSAF/MSG PCPh data layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgPCPh(mpop.channel.GenericChannel):
"""NWCSAF/MSG PCPh data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
Palette now missing
"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self, "PCPh")
self.filled = False
self.name = "PCPh"
# self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.pcph = None
self.pcph_pc = None
self.pcph_quality = None
self.pcph_dataflag = None
self.processing_flags = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
self.pcph = None
self.pcph_pc_palette = None
self.pcph_quality_palette = None
self.pcph_sflag_palette = None
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.pcph_pc.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
# ------------------------------------------------------------------
def read(self, filename, calibrate=True):
"""Reader for the . Use *filename* to read data.
"""
import h5py
# Erste Zeile notwendig?
self.pcph = MsgPCPhData()
self.pcph_pc = MsgPCPhData()
self.pcph_quality = MsgPCPhData()
self.pcph_dataflag = MsgPCPhData()
self.processing_flags = MsgPCPhData()
LOG.debug("Filename = <" + str(filename) + ">")
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The PPh PC data
h5d = h5f['PCPh_PC']
self.pcph_pc.data = h5d[:, :]
self.pcph_pc.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.pcph_pc.offset = h5d.attrs["OFFSET"]
self.pcph_pc.num_of_lines = h5d.attrs["N_LINES"]
self.pcph_pc.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.pcph_pc.num_of_lines,
self.pcph_pc.num_of_columns)
self.pcph_pc.product = h5d.attrs["PRODUCT"]
self.pcph_pc.id = h5d.attrs["ID"]
if calibrate:
self.pcph_pc = (self.pcph_pc.data *
self.pcph_pc.scaling_factor +
self.pcph_pc.offset)
else:
self.pcph_pc = self.pcph_pc.data
self.pcph_pc_palette = _get_palette(h5f, 'PCPh_PC') / 255.0
# The PPh QUALITY data
h5d = h5f['PCPh_QUALITY']
self.pcph_quality.data = h5d[:, :]
self.pcph_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.pcph_quality.offset = h5d.attrs["OFFSET"]
self.pcph_quality.num_of_lines = h5d.attrs["N_LINES"]
self.pcph_quality.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.pcph_quality.num_of_lines,
self.pcph_quality.num_of_columns)
self.pcph_quality.product = h5d.attrs["PRODUCT"]
self.pcph_quality.id = h5d.attrs["ID"]
# The PPh DATA FLAG data
h5d = h5f['PCPh_DATAFLAG']
self.pcph_dataflag.data = h5d[:, :]
self.pcph_dataflag.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.pcph_dataflag.offset = h5d.attrs["OFFSET"]
self.pcph_dataflag.num_of_lines = h5d.attrs["N_LINES"]
self.pcph_dataflag.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.pcph_dataflag.num_of_lines,
self.pcph_dataflag.num_of_columns)
self.pcph_dataflag.product = h5d.attrs["PRODUCT"]
self.pcph_dataflag.id = h5d.attrs["ID"]
# ------------------------
h5f.close()
self.processing_flags = self.processing_flags.data
self.area = get_area_from_file(filename)
self.filled = True
def project(self, coverage):
"""Remaps the NWCSAF/MSG PCPh to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgPCPh()
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
retv.pcph_pc = coverage.project_array(self.pcph_pc)
retv.pcph_pc_palette = self.pcph_pc_palette
#retv.processing_flags = \
# coverage.project_array(self.processing_flags)
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
class MsgCRPhData(object):
"""NWCSAF/MSG CRPh layer
"""
def __init__(self):
self.data = None
self.scaling_factor = 1
self.offset = 0
self.num_of_lines = 0
self.num_of_columns = 0
self.product = ""
self.id = ""
class MsgCRPh(mpop.channel.GenericChannel):
"""NWCSAF/MSG CRPh data structure as retrieved from HDF5
file. Resolution sets the nominal resolution of the data.
Palette now missing
"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self, "CRPh")
self.filled = False
self.name = "CRPh"
# self.resolution = resolution
self.package = ""
self.saf = ""
self.product_name = ""
self.num_of_columns = 0
self.num_of_lines = 0
self.projection_name = ""
self.pcs_def = ""
self.xscale = 0
self.yscale = 0
self.ll_lon = 0.0
self.ll_lat = 0.0
self.ur_lon = 0.0
self.ur_lat = 0.0
self.region_name = ""
self.cfac = 0
self.lfac = 0
self.coff = 0
self.loff = 0
self.nb_param = 0
self.gp_sc_id = 0
self.image_acquisition_time = 0
self.spectral_channel_id = 0
self.nominal_product_time = 0
self.sgs_product_quality = 0
self.sgs_product_completeness = 0
self.product_algorithm_version = ""
self.crph = None
self.crph_crr = None
self.crph_accum = None
self.crph_IQF = None
self.crph_quality = None
self.crph_dataflag = None
self.processing_flags = None
self.shape = None
self.satid = ""
self.qc_straylight = -1
self.crph = None
self.crph_pc_palette = None
self.crph_quality_palette = None
self.crph_sflag_palette = None
def __str__(self):
return ("'%s: shape %s, resolution %sm'" %
(self.name,
self.crph_crr.shape,
self.resolution))
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return self.filled
# ------------------------------------------------------------------
def read(self, filename, calibrate=True):
"""Reader for the . Use *filename* to read data.
"""
import h5py
# Erste Zeile notwendig?
self.crph = MsgCRPhData()
self.crph_crr = MsgCRPhData()
self.crph_accum = MsgCRPhData()
self.crph_iqf = MsgCRPhData()
self.crph_quality = MsgCRPhData()
self.crph_dataflag = MsgCRPhData()
self.processing_flags = MsgCRPhData()
LOG.debug("Filename = <" + str(filename) + ">")
h5f = h5py.File(filename, 'r')
# pylint: disable-msg=W0212
self.package = h5f.attrs["PACKAGE"]
self.saf = h5f.attrs["SAF"]
self.product_name = h5f.attrs["PRODUCT_NAME"]
self.num_of_columns = h5f.attrs["NC"]
self.num_of_lines = h5f.attrs["NL"]
self.projection_name = h5f.attrs["PROJECTION_NAME"]
self.region_name = h5f.attrs["REGION_NAME"]
self.cfac = h5f.attrs["CFAC"]
self.lfac = h5f.attrs["LFAC"]
self.coff = h5f.attrs["COFF"]
self.loff = h5f.attrs["LOFF"]
self.nb_param = h5f.attrs["NB_PARAMETERS"]
self.gp_sc_id = h5f.attrs["GP_SC_ID"]
self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
# pylint: enable-msg=W0212
# ------------------------
# The CRPh CRR data
h5d = h5f['CRPh_CRR']
self.crph_crr.data = h5d[:, :]
self.crph_crr.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crph_crr.offset = h5d.attrs["OFFSET"]
self.crph_crr.num_of_lines = h5d.attrs["N_LINES"]
self.crph_crr.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.crph_crr.num_of_lines,
self.crph_crr.num_of_columns)
self.crph_crr.product = h5d.attrs["PRODUCT"]
self.crph_crr.id = h5d.attrs["ID"]
if calibrate:
self.crph_crr = (self.crph_crr.data *
self.crph_crr.scaling_factor +
self.crph_crr.offset)
else:
self.crph_crr = self.crph_crr.data
self.crph_crr_palette = _get_palette(h5f, 'CRPh_CRR') / 255.0
# The CRPh ACCUM data
h5d = h5f['CRPh_ACUM']
self.crph_accum.data = h5d[:, :]
self.crph_accum.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crph_accum.offset = h5d.attrs["OFFSET"]
self.crph_accum.num_of_lines = h5d.attrs["N_LINES"]
self.crph_accum.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.crph_accum.num_of_lines,
self.crph_accum.num_of_columns)
self.crph_accum.product = h5d.attrs["PRODUCT"]
self.crph_accum.id = h5d.attrs["ID"]
if calibrate:
self.crph_accum = (self.crph_accum.data *
self.crph_accum.scaling_factor +
self.crph_accum.offset)
else:
self.crph_accum = self.crph_accum.data
self.crph_accum_palette = _get_palette(h5f, 'CRPh_ACUM') / 255.0
# The CRPH IQF data
h5d = h5f['CRPh_IQF']
self.crph_iqf.data = h5d[:, :]
self.crph_iqf.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crph_iqf.offset = h5d.attrs["OFFSET"]
self.crph_iqf.num_of_lines = h5d.attrs["N_LINES"]
self.crph_iqf.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.crph_iqf.num_of_lines,
self.crph_iqf.num_of_columns)
self.crph_iqf.product = h5d.attrs["PRODUCT"]
self.crph_iqf.id = h5d.attrs["ID"]
# The CRPh QUALITY data
h5d = h5f['CRPh_QUALITY']
self.crph_quality.data = h5d[:, :]
self.crph_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crph_quality.offset = h5d.attrs["OFFSET"]
self.crph_quality.num_of_lines = h5d.attrs["N_LINES"]
self.crph_quality.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.crph_quality.num_of_lines,
self.crph_quality.num_of_columns)
self.crph_quality.product = h5d.attrs["PRODUCT"]
self.crph_quality.id = h5d.attrs["ID"]
# The CRPh DATA FLAG data
h5d = h5f['CRPh_DATAFLAG']
self.crph_dataflag.data = h5d[:, :]
self.crph_dataflag.scaling_factor = h5d.attrs["SCALING_FACTOR"]
self.crph_dataflag.offset = h5d.attrs["OFFSET"]
self.crph_dataflag.num_of_lines = h5d.attrs["N_LINES"]
self.crph_dataflag.num_of_columns = h5d.attrs["N_COLS"]
self.shape = (self.crph_dataflag.num_of_lines,
self.crph_dataflag.num_of_columns)
self.crph_dataflag.product = h5d.attrs["PRODUCT"]
self.crph_dataflag.id = h5d.attrs["ID"]
# ------------------------
h5f.close()
self.processing_flags = self.processing_flags.data
self.area = get_area_from_file(filename)
self.filled = True
def project(self, coverage):
"""Remaps the NWCSAF/MSG CRPh to cartographic map-projection on
area give by a pre-registered area-id. Faster version of msg_remap!
"""
LOG.info("Projecting channel %s..." % (self.name))
region = coverage.out_area
dest_area = region.area_id
retv = MsgCRPh()
retv.name = self.name
retv.package = self.package
retv.saf = self.saf
retv.product_name = self.product_name
retv.region_name = dest_area
retv.cfac = self.cfac
retv.lfac = self.lfac
retv.coff = self.coff
retv.loff = self.loff
retv.nb_param = self.nb_param
retv.gp_sc_id = self.gp_sc_id
retv.image_acquisition_time = self.image_acquisition_time
retv.spectral_channel_id = self.spectral_channel_id
retv.nominal_product_time = self.nominal_product_time
retv.sgs_product_quality = self.sgs_product_quality
retv.sgs_product_completeness = self.sgs_product_completeness
retv.product_algorithm_version = self.product_algorithm_version
retv.crph_crr = coverage.project_array(self.crph_crr)
retv.crph_crr_palette = self.crph_crr_palette
retv.crph_accum = coverage.project_array(self.crph_accum)
retv.crph_accum_palette = self.crph_accum_palette
# retv.processing_flags = \
# coverage.project_array(self.processing_flags)
retv.qc_straylight = self.qc_straylight
retv.region_name = dest_area
retv.area = region
retv.projection_name = region.proj_id
retv.pcs_def = pcs_def_from_region(region)
retv.num_of_columns = region.x_size
retv.num_of_lines = region.y_size
retv.xscale = region.pixel_size_x
retv.yscale = region.pixel_size_y
import pyproj
prj = pyproj.Proj(region.proj4_string)
aex = region.area_extent
lonur, latur = prj(aex[2], aex[3], inverse=True)
lonll, latll = prj(aex[0], aex[1], inverse=True)
retv.ll_lon = lonll
retv.ll_lat = latll
retv.ur_lon = lonur
retv.ur_lat = latur
self.shape = region.shape
retv.filled = True
retv.resolution = self.resolution
return retv
""" NEU ENDE """
MSG_PGE_EXTENTIONS = ["PLAX.CTTH.0.h5", "PLAX.CLIM.0.h5", "h5"]
def get_best_product(filename, area_extent):
"""Get the best of the available products for the *filename* template.
"""
for ext in MSG_PGE_EXTENTIONS:
match_str = filename + "." + ext
LOG.debug("glob-string for filename: " + str(match_str))
flist = glob.glob(match_str)
if len(flist) == 0:
LOG.warning("No matching %s.%s input MSG file."
% (filename, ext))
else:
# File found:
if area_extent is None:
LOG.warning("Didn't specify an area, taking " + flist[0])
return flist[0]
for fname in flist:
aex = get_area_extent(fname)
#import pdb
# pdb.set_trace()
if np.all(np.max(np.abs(np.array(aex) -
np.array(area_extent))) < 1000):
LOG.info("MSG file found: %s" % fname)
return fname
LOG.info("Did not find any MSG file for specified area")
def get_best_products(filename, area_extent):
"""Get the best of the available products for the *filename* template.
"""
filenames = []
for ext in MSG_PGE_EXTENTIONS:
match_str = filename + "." + ext
LOG.debug('Match string = ' + str(match_str))
flist = glob.glob(match_str)
if len(flist) == 0:
LOG.warning("No matching %s.%s input MSG file."
% (filename, ext))
else:
# File found:
if area_extent is None:
LOG.warning("Didn't specify an area, taking " + flist[0])
filenames.append(flist[0])
else:
found = False
for fname in flist:
aex = get_area_extent(fname)
if np.all(np.max(np.abs(np.array(aex) -
np.array(area_extent))) < 1000):
found = True
LOG.info("MSG file found: %s" % fname)
filenames.append(fname)
if not found:
LOG.info(
"Did not find any MSG file for specified area")
LOG.debug("Sorted filenames: %s", str(sorted(filenames)))
return sorted(filenames)
def get_area_from_file(filename):
"""Get the area from the h5 file.
"""
from pyresample.geometry import AreaDefinition
import h5py
aex = get_area_extent(filename)
h5f = h5py.File(filename, 'r')
pname = h5f.attrs["PROJECTION_NAME"]
proj = {}
if pname.startswith("GEOS"):
proj["proj"] = "geos"
proj["a"] = "6378169.0"
proj["b"] = "6356583.8"
proj["h"] = "35785831.0"
proj["lon_0"] = str(float(pname.split("<")[1][:-1]))
else:
raise NotImplementedError("Only geos projection supported yet.")
#h5f.attrs["REGION_NAME"] # <type 'numpy.string_'> alps
#pname # <type 'numpy.string_'> GEOS<+009.5>
#proj # <type 'dict'> {'a': '6378169.0', 'h': '35785831.0', 'b': '6356583.8', 'lon_0': '9.5', 'proj': 'geos'}
#int(h5f.attrs["NC"]) # <type 'int'> 349
#int(h5f.attrs["NL"]) # <type 'int'> 151
#aex # <type 'tuple'> (-613578.17189778585, 4094060.208733994, 433553.97518292483, 4547101.2335793395)
area_def = AreaDefinition(h5f.attrs["REGION_NAME"],
h5f.attrs["REGION_NAME"],
pname,
proj,
int(h5f.attrs["NC"]),
int(h5f.attrs["NL"]),
aex)
h5f.close()
return area_def
def load(scene, **kwargs):
"""Load data into the *channels*. *Channels* is a list or a tuple
containing channels we will load data into. If None, all channels are
loaded.
"""
print "*** read NWC-SAF data with nwcsaf_msg.py", scene.channels_to_load
area_extent = kwargs.get("area_extent")
calibrate = kwargs.get("calibrate", True)
conf = ConfigParser.ConfigParser()
conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg"))
directory = conf.get(scene.instrument_name + "-level3", "dir", raw=True)
filename_raw = conf.get(scene.instrument_name + "-level3", "filename", raw=True)
pathname = os.path.join(directory, filename_raw)
LOG.debug("Inside load: " + str(scene.channels_to_load))
if "CloudMask" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "01",
"product": "CMa__"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgCloudMask()
ct_chan.read(filename,calibrate)
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "CloudType" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "02",
"product": "CT___"})
filenames = get_best_products(filename_wildcards, area_extent)
if len(filenames) > 0:
filename = filenames[-1]
else:
LOG.info("Did not find any MSG file for specified area")
return
ct_chan = MsgCloudType()
ct_chan.read(filenames[-1])
LOG.debug("Uncorrected file: %s", filename)
ct_chan.name = "CloudType"
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "CloudType_plax" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "02",
"product": "CT___"})
filenames = get_best_products(filename_wildcards, area_extent)
if len(filenames) > 0:
filename = filenames[0]
else:
LOG.info("Did not find any MSG file for specified area")
return
ct_chan_plax = MsgCloudType()
if filename != None:
LOG.debug("Parallax corrected file: %s", filename)
ct_chan_plax.read(filename)
ct_chan_plax.name = "CloudType_plax"
ct_chan_plax.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan_plax.resolution = ct_chan_plax.area.pixel_size_x
scene.channels.append(ct_chan_plax)
print "*** hallo world***"
if "CTTH" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "03",
"product": "CTTH_"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgCTTH()
ct_chan.read(filename,calibrate)
print "CCC", scene.sat_nr()
ct_chan.satid = (scene.satname[0:8].capitalize() +
str(scene.sat_nr()).rjust(2))
print "bullshit (nwcsat_msg.py) ", ct_chan.satid # "Meteosat 9"
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "CRR" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "05",
"product": "CRR__"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgCRR()
ct_chan.read(filename,calibrate)
ct_chan.name = "CRR_" # !!!!! changed as we create another channel named 'CRR' when transforming the format
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "PC" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "04",
"product": "PC___"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgPC()
ct_chan.read(filename,calibrate)
ct_chan.name = "PC"
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "SPhR" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "13",
"product": "SPhR_"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgSPhR()
ct_chan.read(filename,calibrate)
ct_chan.name = "SPhR"
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "PCPh" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "14",
"product": "PCPh_"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgPCPh()
ct_chan.read(filename,calibrate)
ct_chan.name = "PCPh_"
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if "CRPh" in scene.channels_to_load:
filename_wildcards = (scene.time_slot.strftime(pathname)
% {"number": "14",
"product": "CRPh_"})
filename = get_best_product(filename_wildcards, area_extent)
if filename != None:
ct_chan = MsgCRPh()
ct_chan.read(filename,calibrate)
ct_chan.name = "CRPh_"
ct_chan.satid = (scene.satname.capitalize() +
str(scene.sat_nr()).rjust(2))
ct_chan.resolution = ct_chan.area.pixel_size_x
scene.channels.append(ct_chan)
if 'filename' in locals() and filename != None:
# print "nwcsaf_msg", len(filename), filename
if len(filename) > 12:
sat_nr= int(basename(filename)[10:11])+7
if int(scene.sat_nr()) != int(sat_nr):
print "*** Warning, change Meteosat number to "+str(sat_nr)+" (input: "+scene.sat_nr()+")"
#scene.number = str(sat_nr).zfill(2)
# !!! update number !!!
scene.number = str(sat_nr)
LOG.info("Loading channels done.")
| gpl-3.0 |
Cito/sqlalchemy | lib/sqlalchemy/connectors/pyodbc.py | 80 | 5896 | # connectors/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import Connector
from .. import util
import sys
import re
class PyODBCConnector(Connector):
driver = 'pyodbc'
supports_sane_multi_rowcount = False
if util.py2k:
# PyODBC unicode is broken on UCS-4 builds
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = supports_unicode
supports_native_decimal = True
default_paramstyle = 'named'
# for non-DSN connections, this should
# hold the desired driver name
pyodbc_driver_name = None
# will be set to True after initialize()
# if the freetds.so is detected
freetds = False
# will be set to the string version of
# the FreeTDS driver if freetds is detected
freetds_driver_version = None
# will be set to True after initialize()
# if the libessqlsrv.so is detected
easysoft = False
def __init__(self, supports_unicode_binds=None, **kw):
super(PyODBCConnector, self).__init__(**kw)
self._user_supports_unicode_binds = supports_unicode_binds
@classmethod
def dbapi(cls):
return __import__('pyodbc')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ('ansi', 'unicode_results', 'autocommit'):
if param in keys:
connect_args[param] = util.asbool(keys.pop(param))
if 'odbc_connect' in keys:
connectors = [util.unquote_plus(keys.pop('odbc_connect'))]
else:
dsn_connection = 'dsn' in keys or \
('host' in keys and 'database' not in keys)
if dsn_connection:
connectors = ['dsn=%s' % (keys.pop('host', '') or \
keys.pop('dsn', ''))]
else:
port = ''
if 'port' in keys and not 'port' in query:
port = ',%d' % int(keys.pop('port'))
connectors = ["DRIVER={%s}" %
keys.pop('driver', self.pyodbc_driver_name),
'Server=%s%s' % (keys.pop('host', ''), port),
'Database=%s' % keys.pop('database', '')]
user = keys.pop("user", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop('password', ''))
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if 'odbc_autotranslate' in keys:
connectors.append("AutoTranslate=%s" %
keys.pop("odbc_autotranslate"))
connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return "The cursor's connection has been closed." in str(e) or \
'Attempt to use a closed connection.' in str(e)
elif isinstance(e, self.dbapi.Error):
return '[08S01]' in str(e)
else:
return False
def initialize(self, connection):
# determine FreeTDS first. can't issue SQL easily
# without getting unicode_statements/binds set up.
pyodbc = self.dbapi
dbapi_con = connection.connection
_sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
))
self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
))
if self.freetds:
self.freetds_driver_version = dbapi_con.getinfo(
pyodbc.SQL_DRIVER_VER)
self.supports_unicode_statements = (
not util.py2k or
(not self.freetds and not self.easysoft)
)
if self._user_supports_unicode_binds is not None:
self.supports_unicode_binds = self._user_supports_unicode_binds
elif util.py2k:
self.supports_unicode_binds = (
not self.freetds or self.freetds_driver_version >= '0.91'
) and not self.easysoft
else:
self.supports_unicode_binds = True
# run other initialization which asks for user name, etc.
super(PyODBCConnector, self).initialize(connection)
def _dbapi_version(self):
if not self.dbapi:
return ()
return self._parse_dbapi_version(self.dbapi.version)
def _parse_dbapi_version(self, vers):
m = re.match(
r'(?:py.*-)?([\d\.]+)(?:-(\w+))?',
vers
)
if not m:
return ()
vers = tuple([int(x) for x in m.group(1).split(".")])
if m.group(2):
vers += (m.group(2),)
return vers
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
| mit |
vjpai/grpc | test/cpp/qps/qps_json_driver_scenario_gen.py | 5 | 2296 | #!/usr/bin/env python2.7
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gen_build_yaml as gen
import json
COPYRIGHT = """
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
qps_json_driver_scenario_set = \
[item for item in all_scenario_set if item['name'] == 'qps_json_driver']
qps_json_driver_arg_set = \
[item['args'][2] for item in qps_json_driver_scenario_set \
if 'args' in item and len(item['args']) > 2]
deserialized_scenarios = [json.loads(item)['scenarios'][0] \
for item in qps_json_driver_arg_set]
all_scenarios = {scenario['name'].encode('ascii', 'ignore'): \
'\'{\'scenarios\' : [' + json.dumps(scenario) + ']}\'' \
for scenario in deserialized_scenarios}
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('qps_json_driver_scenarios.bzl', 'w') as f:
f.write(COPYRIGHT)
f.write('"""Scenarios of qps driver."""\n\n')
f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str +
'\n')
generate_args()
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/lxml-3.3.6/src/lxml/tests/test_htmlparser.py | 16 | 18693 | # -*- coding: utf-8 -*-
"""
HTML parser test cases for etree
"""
import unittest
import tempfile, os, os.path, sys
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, StringIO, BytesIO, fileInTestDir, _bytes, _str
from common_imports import SillyFileLike, HelperTestCase, write_to_file, next
try:
unicode
except NameError:
unicode = str
class HtmlParserTestCase(HelperTestCase):
"""HTML parser test cases
"""
etree = etree
html_str = _bytes("<html><head><title>test</title></head><body><h1>page title</h1></body></html>")
html_str_pretty = _bytes("""\
<html>
<head><title>test</title></head>
<body><h1>page title</h1></body>
</html>
""")
broken_html_str = _bytes("<html><head><title>test"
"<body><h1>page title</h3></p></html>")
uhtml_str = _bytes(
"<html><head><title>test á</title></head>"
"<body><h1>page á title</h1></body></html>").decode('utf8')
def tearDown(self):
super(HtmlParserTestCase, self).tearDown()
self.etree.set_default_parser()
def test_module_HTML(self):
element = self.etree.HTML(self.html_str)
self.assertEqual(self.etree.tostring(element, method="html"),
self.html_str)
def test_module_HTML_unicode(self):
element = self.etree.HTML(self.uhtml_str)
self.assertEqual(
self.etree.tostring(element, method="html", encoding='unicode'),
self.uhtml_str)
self.assertEqual(element.findtext('.//h1'),
_bytes("page á title").decode('utf8'))
def test_wide_unicode_xml(self):
if sys.maxunicode < 1114111:
return # skip test
element = self.etree.HTML(_bytes(
'<html><body><p>\\U00026007</p></body></html>'
).decode('unicode_escape'))
p_text = element.findtext('.//p')
self.assertEqual(1, len(p_text))
self.assertEqual(_bytes('\\U00026007').decode('unicode_escape'),
p_text)
def test_module_HTML_pretty_print(self):
element = self.etree.HTML(self.html_str)
self.assertEqual(self.etree.tostring(element, method="html", pretty_print=True),
self.html_str_pretty)
def test_module_parse_html_error(self):
parser = self.etree.HTMLParser(recover=False)
parse = self.etree.parse
f = BytesIO("<html></body>")
self.assertRaises(self.etree.XMLSyntaxError,
parse, f, parser)
def test_html_element_name_empty(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
el = Element('name')
self.assertRaises(ValueError, Element, '{}')
self.assertRaises(ValueError, setattr, el, 'tag', '{}')
self.assertRaises(ValueError, Element, '{test}')
self.assertRaises(ValueError, setattr, el, 'tag', '{test}')
def test_html_element_name_colon(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
pname = Element('p:name')
self.assertEqual(pname.tag, 'p:name')
pname = Element('{test}p:name')
self.assertEqual(pname.tag, '{test}p:name')
pname = Element('name')
pname.tag = 'p:name'
self.assertEqual(pname.tag, 'p:name')
def test_html_element_name_quote(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
self.assertRaises(ValueError, Element, 'p"name')
self.assertRaises(ValueError, Element, "na'me")
self.assertRaises(ValueError, Element, '{test}"name')
self.assertRaises(ValueError, Element, "{test}name'")
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', "pname'")
self.assertRaises(ValueError, setattr, el, 'tag', '"pname')
self.assertEqual(el.tag, "name")
def test_html_element_name_space(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
self.assertRaises(ValueError, Element, ' name ')
self.assertRaises(ValueError, Element, 'na me')
self.assertRaises(ValueError, Element, '{test} name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', ' name ')
self.assertEqual(el.tag, "name")
def test_html_subelement_name_empty(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, '{}')
self.assertRaises(ValueError, SubElement, el, '{test}')
def test_html_subelement_name_colon(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
SubElement = self.etree.SubElement
el = Element('name')
pname = SubElement(el, 'p:name')
self.assertEqual(pname.tag, 'p:name')
pname = SubElement(el, '{test}p:name')
self.assertEqual(pname.tag, '{test}p:name')
def test_html_subelement_name_quote(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, "name'")
self.assertRaises(ValueError, SubElement, el, 'na"me')
self.assertRaises(ValueError, SubElement, el, "{test}na'me")
self.assertRaises(ValueError, SubElement, el, '{test}"name')
def test_html_subelement_name_space(self):
parser = self.etree.HTMLParser()
Element = parser.makeelement
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, ' name ')
self.assertRaises(ValueError, SubElement, el, 'na me')
self.assertRaises(ValueError, SubElement, el, '{test} name')
def test_module_parse_html_norecover(self):
parser = self.etree.HTMLParser(recover=False)
parse = self.etree.parse
f = BytesIO(self.broken_html_str)
self.assertRaises(self.etree.XMLSyntaxError,
parse, f, parser)
def test_parse_encoding_8bit_explicit(self):
text = _str('Søk på nettet')
html_latin1 = (_str('<p>%s</p>') % text).encode('iso-8859-1')
tree = self.etree.parse(
BytesIO(html_latin1),
self.etree.HTMLParser(encoding="iso-8859-1"))
p = tree.find("//p")
self.assertEqual(p.text, text)
def test_parse_encoding_8bit_override(self):
text = _str('Søk på nettet')
wrong_head = _str('''
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=UTF-8" />
</head>''')
html_latin1 = (_str('<html>%s<body><p>%s</p></body></html>') % (wrong_head,
text)
).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
self.etree.parse,
BytesIO(html_latin1))
tree = self.etree.parse(
BytesIO(html_latin1),
self.etree.HTMLParser(encoding="iso-8859-1"))
p = tree.find("//p")
self.assertEqual(p.text, text)
def test_module_HTML_broken(self):
element = self.etree.HTML(self.broken_html_str)
self.assertEqual(self.etree.tostring(element, method="html"),
self.html_str)
def test_module_HTML_cdata(self):
# by default, libxml2 generates CDATA nodes for <script> content
html = _bytes('<html><head><style>foo</style></head></html>')
element = self.etree.HTML(html)
self.assertEqual(element[0][0].text, "foo")
def test_module_HTML_access(self):
element = self.etree.HTML(self.html_str)
self.assertEqual(element[0][0].tag, 'title')
def test_module_parse_html(self):
parser = self.etree.HTMLParser()
filename = tempfile.mktemp(suffix=".html")
write_to_file(filename, self.html_str, 'wb')
try:
f = open(filename, 'rb')
tree = self.etree.parse(f, parser)
f.close()
self.assertEqual(self.etree.tostring(tree.getroot(), method="html"),
self.html_str)
finally:
os.remove(filename)
def test_module_parse_html_filelike(self):
parser = self.etree.HTMLParser()
f = SillyFileLike(self.html_str)
tree = self.etree.parse(f, parser)
html = self.etree.tostring(tree.getroot(),
method="html", encoding='UTF-8')
self.assertEqual(html, self.html_str)
## def test_module_parse_html_filelike_unicode(self):
## parser = self.etree.HTMLParser()
## f = SillyFileLike(self.uhtml_str)
## tree = self.etree.parse(f, parser)
## html = self.etree.tostring(tree.getroot(), encoding='UTF-8')
## self.assertEqual(unicode(html, 'UTF-8'), self.uhtml_str)
def test_html_file_error(self):
parser = self.etree.HTMLParser()
parse = self.etree.parse
self.assertRaises(IOError,
parse, "__some_hopefully_nonexisting_file__.html",
parser)
def test_default_parser_HTML_broken(self):
self.assertRaises(self.etree.XMLSyntaxError,
self.etree.parse, BytesIO(self.broken_html_str))
self.etree.set_default_parser( self.etree.HTMLParser() )
tree = self.etree.parse(BytesIO(self.broken_html_str))
self.assertEqual(self.etree.tostring(tree.getroot(), method="html"),
self.html_str)
self.etree.set_default_parser()
self.assertRaises(self.etree.XMLSyntaxError,
self.etree.parse, BytesIO(self.broken_html_str))
def test_html_iterparse(self):
iterparse = self.etree.iterparse
f = BytesIO(
'<html><head><title>TITLE</title><body><p>P</p></body></html>')
iterator = iterparse(f, html=True)
self.assertEqual(None, iterator.root)
events = list(iterator)
root = iterator.root
self.assertTrue(root is not None)
self.assertEqual(
[('end', root[0][0]), ('end', root[0]), ('end', root[1][0]),
('end', root[1]), ('end', root)],
events)
def test_html_iterparse_stop_short(self):
iterparse = self.etree.iterparse
f = BytesIO(
'<html><head><title>TITLE</title><body><p>P</p></body></html>')
iterator = iterparse(f, html=True)
self.assertEqual(None, iterator.root)
event, element = next(iterator)
self.assertEqual('end', event)
self.assertEqual('title', element.tag)
self.assertEqual(None, iterator.root)
del element
event, element = next(iterator)
self.assertEqual('end', event)
self.assertEqual('head', element.tag)
self.assertEqual(None, iterator.root)
del element
del iterator
def test_html_iterparse_broken(self):
iterparse = self.etree.iterparse
f = BytesIO('<head><title>TEST></head><p>P<br></div>')
iterator = iterparse(f, html=True)
self.assertEqual(None, iterator.root)
events = list(iterator)
root = iterator.root
self.assertTrue(root is not None)
self.assertEqual('html', root.tag)
self.assertEqual('head', root[0].tag)
self.assertEqual('body', root[1].tag)
self.assertEqual('p', root[1][0].tag)
self.assertEqual('br', root[1][0][0].tag)
self.assertEqual(
[('end', root[0][0]), ('end', root[0]), ('end', root[1][0][0]),
('end', root[1][0]), ('end', root[1]), ('end', root)],
events)
def test_html_iterparse_broken_no_recover(self):
iterparse = self.etree.iterparse
f = BytesIO('<p>P<br></div>')
iterator = iterparse(f, html=True, recover=False)
self.assertRaises(self.etree.XMLSyntaxError, list, iterator)
def test_html_iterparse_file(self):
iterparse = self.etree.iterparse
iterator = iterparse(fileInTestDir("shakespeare.html"),
html=True)
self.assertEqual(None, iterator.root)
events = list(iterator)
root = iterator.root
self.assertTrue(root is not None)
self.assertEqual(249, len(events))
self.assertFalse(
[event for (event, element) in events if event != 'end'])
def test_html_iterparse_start(self):
iterparse = self.etree.iterparse
f = BytesIO(
'<html><head><title>TITLE</title><body><p>P</p></body></html>')
iterator = iterparse(f, html=True, events=('start',))
self.assertEqual(None, iterator.root)
events = list(iterator)
root = iterator.root
self.assertNotEqual(None, root)
self.assertEqual(
[('start', root), ('start', root[0]), ('start', root[0][0]),
('start', root[1]), ('start', root[1][0])],
events)
def test_html_feed_parser(self):
parser = self.etree.HTMLParser()
parser.feed("<html><body></")
parser.feed("body></html>")
root = parser.close()
self.assertEqual('html', root.tag)
# test that we find all names in the parser dict
self.assertEqual([root], list(root.iter('html')))
self.assertEqual([root[0]], list(root.iter('body')))
def test_html_feed_parser_chunky(self):
parser = self.etree.HTMLParser()
parser.feed("<htm")
parser.feed("l><body")
parser.feed("><")
parser.feed("p><")
parser.feed("strong")
parser.feed(">some ")
parser.feed("text</strong></p><")
parser.feed("/body></html>")
root = parser.close()
self.assertEqual('html', root.tag)
# test that we find all names in the parser dict
self.assertEqual([root], list(root.iter('html')))
self.assertEqual([root[0]], list(root.iter('body')))
self.assertEqual([root[0][0]], list(root.iter('p')))
self.assertEqual([root[0][0][0]], list(root.iter('strong')))
def test_html_feed_parser_more_tags(self):
parser = self.etree.HTMLParser()
parser.feed('<html><head>')
parser.feed('<title>TITLE</title><body><p>P</p></body><')
parser.feed("/html>")
root = parser.close()
self.assertEqual('html', root.tag)
# test that we find all names in the parser dict
self.assertEqual([root], list(root.iter('html')))
self.assertEqual([root[0]], list(root.iter('head')))
self.assertEqual([root[0][0]], list(root.iter('title')))
self.assertEqual([root[1]], list(root.iter('body')))
self.assertEqual([root[1][0]], list(root.iter('p')))
def test_html_parser_target_tag(self):
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append(("start", tag))
assertFalse(attrib)
def end(self, tag):
events.append(("end", tag))
def close(self):
return "DONE"
parser = self.etree.HTMLParser(target=Target())
parser.feed("<html><body></body></html>")
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual([
("start", "html"), ("start", "body"),
("end", "body"), ("end", "html")], events)
def test_html_parser_target_doctype_empty(self):
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append(("start", tag))
assertFalse(attrib)
def end(self, tag):
events.append(("end", tag))
def doctype(self, *args):
events.append(("doctype", args))
def close(self):
return "DONE"
parser = self.etree.HTMLParser(target=Target())
parser.feed("<!DOCTYPE><html><body></body></html>")
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual([
("doctype", (None, None, None)),
("start", "html"), ("start", "body"),
("end", "body"), ("end", "html")], events)
def test_html_parser_target_doctype_html(self):
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append(("start", tag))
assertFalse(attrib)
def end(self, tag):
events.append(("end", tag))
def doctype(self, *args):
events.append(("doctype", args))
def close(self):
return "DONE"
parser = self.etree.HTMLParser(target=Target())
parser.feed("<!DOCTYPE html><html><body></body></html>")
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual([
("doctype", ("html", None, None)),
("start", "html"), ("start", "body"),
("end", "body"), ("end", "html")], events)
def test_html_parser_target_doctype_html_full(self):
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append(("start", tag))
assertFalse(attrib)
def end(self, tag):
events.append(("end", tag))
def doctype(self, *args):
events.append(("doctype", args))
def close(self):
return "DONE"
parser = self.etree.HTMLParser(target=Target())
parser.feed('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "sys.dtd">'
'<html><body></body></html>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual([
("doctype", ("html", "-//W3C//DTD HTML 4.01//EN", "sys.dtd")),
("start", "html"), ("start", "body"),
("end", "body"), ("end", "html")], events)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(HtmlParserTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| apache-2.0 |
codepantry/django | tests/multiple_database/tests.py | 107 | 93762 | from __future__ import unicode_literals
import datetime
import pickle
import warnings
from operator import attrgetter
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.models import signals
from django.db.utils import ConnectionRouter
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.encoding import force_text
from django.utils.six import StringIO
from .models import Book, Person, Pet, Review, UserProfile
from .routers import AuthRouter, TestRouter, WriteRouter
class QueryTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets will use the default database by default"
self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.using('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
def test_default_creation(self):
"Objects created on the default database don't leak onto other databases"
# Create a book on the default database using create()
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save()
# Check that book exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
try:
Book.objects.get(title="Dive into Python")
Book.objects.using('default').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Dive into Python"
)
def test_other_creation(self):
"Objects created on another database don't leak onto the default database"
# Create a book on the second database
Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
# Check that book exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Pro Django"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Pro Django"
)
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
def test_refresh(self):
dive = Book()
dive.title = "Dive into Python"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive2 = Book.objects.using('other').get()
dive2.title = "Dive into Python (on default)"
dive2.save(using='default')
dive.refresh_from_db()
self.assertEqual(dive.title, "Dive into Python")
dive.refresh_from_db(using='default')
self.assertEqual(dive.title, "Dive into Python (on default)")
self.assertEqual(dive._state.db, "default")
def test_basic_queries(self):
"Queries are constrained to a single database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(title__icontains="dive")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive")
dive = Book.objects.using('other').get(title__iexact="dive INTO python")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python")
dive = Book.objects.using('other').get(published__year=2009)
self.assertEqual(dive.title, "Dive into Python")
self.assertEqual(dive.published, datetime.date(2009, 5, 4))
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009)
years = Book.objects.using('other').dates('published', 'year')
self.assertEqual([o.year for o in years], [2009])
years = Book.objects.using('default').dates('published', 'year')
self.assertEqual([o.year for o in years], [])
months = Book.objects.using('other').dates('published', 'month')
self.assertEqual([o.month for o in months], [5])
months = Book.objects.using('default').dates('published', 'month')
self.assertEqual([o.month for o in months], [])
def test_m2m_separation(self):
"M2M fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
pro.authors = [marty]
dive.authors = [mark]
# Inspect the m2m tables directly.
# There should be 1 entry in each database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Check that queries work across m2m joins
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
['Pro Django'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
mark = Person.objects.using('other').get(name="Mark Pilgrim")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.authors.all().values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_forward_operations(self):
"M2M forward manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Add a second author
john = Person.objects.using('other').create(name="John Smith")
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
dive.authors.add(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
['Dive into Python'])
# Remove the second author
dive.authors.remove(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Clear all authors
dive.authors.clear()
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Create an author through the m2m interface
dive.authors.create(name='Jane Brown')
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_reverse_operations(self):
"M2M reverse manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Create a second book on the other database
grease = Book.objects.using('other').create(title="Greasemonkey Hacks",
published=datetime.date(2005, 11, 1))
# Add a books to the m2m
mark.book_set.add(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
['Mark Pilgrim'])
# Remove a book from the m2m
mark.book_set.remove(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Clear the books associated with mark
mark.book_set.clear()
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Create a book through the m2m interface
mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
['Mark Pilgrim'])
def test_m2m_cross_database_protection(self):
"Operations that involve sharing M2M objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to an m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set.add(dive)
# Set a m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set = [pro, dive]
# Add to a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors.add(marty)
# Set a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors = [mark, marty]
def test_m2m_deletion(self):
"Cascaded deletions of m2m relations issue queries on the right database"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
dive.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person still exists ...
self.assertEqual(Person.objects.using('other').count(), 1)
# ... but the book has been deleted
self.assertEqual(Book.objects.using('other').count(), 0)
# ... and the relationship object has also been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Now try deletion in the reverse direction. Set up the relation again
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person has been deleted ...
self.assertEqual(Person.objects.using('other').count(), 0)
# ... but the book still exists
self.assertEqual(Book.objects.using('other').count(), 1)
# ... and the relationship object has been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
def test_foreign_key_separation(self):
"FK fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
george = Person.objects.create(name="George Vilches")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author's favorite books
pro.editor = george
pro.save()
dive.editor = chris
dive.save()
pro = Book.objects.using('default').get(title="Pro Django")
self.assertEqual(pro.editor.name, "George Vilches")
dive = Book.objects.using('other').get(title="Dive into Python")
self.assertEqual(dive.editor.name, "Chris Mills")
# Check that queries work across foreign key joins
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
['George Vilches'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Reget the objects to clear caches
chris = Person.objects.using('other').get(name="Chris Mills")
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(chris.edited.values_list('title', flat=True)),
['Dive into Python'])
def test_foreign_key_reverse_operations(self):
"FK reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author relations
dive.editor = chris
dive.save()
# Add a second book edited by chris
html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
chris.edited.add(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
['Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Remove the second editor
chris.edited.remove(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Clear all edited books
chris.edited.clear()
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
# Create an author through the m2m interface
chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
['Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
def test_foreign_key_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
dive.editor = marty
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.add(dive)
def test_foreign_key_deletion(self):
"Cascaded deletions of Foreign Key relations issue queries on the right database"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Pet.objects.using('other').create(name="Fido", owner=mark)
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Pet.objects.using('other').count(), 1)
# Delete the person object, which will cascade onto the pet
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Person.objects.using('other').count(), 0)
self.assertEqual(Pet.objects.using('other').count(), 0)
def test_foreign_key_validation(self):
"ForeignKey.validate() uses the correct database"
mickey = Person.objects.using('other').create(name="Mickey")
pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
self.assertIsNone(pluto.full_clean())
def test_o2o_separation(self):
"OneToOne fields are constrained to a single database"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
# Retrieve related objects; queries should be database constrained
alice = User.objects.using('default').get(username="alice")
self.assertEqual(alice.userprofile.flavor, "chocolate")
bob = User.objects.using('other').get(username="bob")
self.assertEqual(bob.userprofile.flavor, "crunchy frog")
# Check that queries work across joins
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
['bob'])
# Reget the objects to clear caches
alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(alice_profile.user.username, 'alice')
self.assertEqual(bob_profile.user.username, 'bob')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
with self.assertRaises(ValueError):
bob.userprofile = alice_profile
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
new_bob_profile = UserProfile(flavor="spring surprise")
# assigning a profile requires an explicit pk as the object isn't saved
charlie = User(pk=51, username='charlie', email='charlie@example.com')
charlie.set_unusable_password()
# initially, no db assigned
self.assertEqual(new_bob_profile._state.db, None)
self.assertEqual(charlie._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
new_bob_profile.user = bob
charlie.userprofile = bob_profile
self.assertEqual(new_bob_profile._state.db, 'other')
self.assertEqual(charlie._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog'])
# When saved (no using required), new objects goes to 'other'
charlie.save()
bob_profile.save()
new_bob_profile.save()
self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob', 'charlie'])
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# This also works if you assign the O2O relation in the constructor
denise = User.objects.db_manager('other').create_user('denise', 'denise@example.com')
denise_profile = UserProfile(flavor="tofu", user=denise)
self.assertEqual(denise_profile._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# When saved, the new profile goes to 'other'
denise_profile.save()
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise', 'tofu'])
def test_generic_key_separation(self):
"Generic fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review1 = Review.objects.using('default').get(source="Python Monthly")
self.assertEqual(review1.content_object.title, "Pro Django")
review2 = Review.objects.using('other').get(source="Python Weekly")
self.assertEqual(review2.content_object.title, "Dive into Python")
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)),
['Python Weekly'])
def test_generic_key_reverse_operations(self):
"Generic reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
temp = Book.objects.using('other').create(title="Temp",
published=datetime.date(2009, 5, 4))
review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly'])
# Add a second review
dive.reviews.add(review2)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly', 'Python Weekly'])
# Remove the second author
dive.reviews.remove(review1)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly'])
# Clear all reviews
dive.reviews.clear()
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
# Create an author through the generic interface
dive.reviews.create(source='Python Daily')
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily'])
def test_generic_key_cross_database_protection(self):
"Operations that involve sharing generic key objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
review1.content_object = dive
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.reviews.add(review1)
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly'])
# When saved, John goes to 'other'
review3.save()
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily', 'Python Weekly'])
def test_generic_key_deletion(self):
"Cascaded deletions of Generic Key relations issue queries on the right database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Check the initial state
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Review.objects.using('other').count(), 1)
# Delete the Book object, which will cascade onto the pet
dive.delete(using='other')
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Book.objects.using('other').count(), 0)
self.assertEqual(Review.objects.using('other').count(), 0)
def test_ordering(self):
"get_next_by_XXX commands stick to a single database"
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
learn = Book.objects.using('other').create(title="Learning Python",
published=datetime.date(2008, 7, 16))
self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
def test_raw(self):
"test the raw() method across databases"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
def test_select_related(self):
"Database assignment is retained if an object is retrieved with select_related()"
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
# Retrieve the Person using select_related()
book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
# The editor instance should have a db state
self.assertEqual(book.editor._state.db, 'other')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
sub = Person.objects.using('other').filter(name='fff')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. If the subquery explicitly uses a
# different database, an error should be raised.
self.assertRaises(ValueError, str, qs.query)
# Evaluating the query shouldn't work, either
with self.assertRaises(ValueError):
for obj in qs:
pass
def test_related_manager(self):
"Related managers return managers, not querysets"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# extra_arg is removed by the BookManager's implementation of
# create(); but the BookManager's implementation won't get called
# unless edited returns a Manager, not a queryset
mark.book_set.create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.book_set.get_or_create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.get_or_create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
class ConnectionRouterTestCase(SimpleTestCase):
@override_settings(DATABASE_ROUTERS=[
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'])
def test_router_init_default(self):
connection_router = ConnectionRouter()
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
def test_router_init_arg(self):
connection_router = ConnectionRouter([
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'
])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Init with instances instead of strings
connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Make the 'other' database appear to be a replica of the 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class RouterTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets obey the router for db suggestions"
self.assertEqual(Book.objects.db, 'other')
self.assertEqual(Book.objects.all().db, 'other')
self.assertEqual(Book.objects.using('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
def test_migrate_selection(self):
"Synchronization behavior is predictable"
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]):
# Add the auth router to the chain. TestRouter is a universal
# synchronizer, so it should have no effect.
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]):
# Now check what happens if the router order is reversed.
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
def test_migrate_legacy_router(self):
class LegacyRouter(object):
def allow_migrate(self, db, model):
"""
Deprecated allow_migrate signature should trigger
RemovedInDjango110Warning.
"""
assert db == 'default'
assert model is User
return True
with override_settings(DATABASE_ROUTERS=[LegacyRouter()]):
with warnings.catch_warnings(record=True) as recorded:
warnings.filterwarnings('always')
msg = (
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 1.10."
)
self.assertTrue(router.allow_migrate_model('default', User))
self.assertEqual(force_text(recorded.pop().message), msg)
self.assertEqual(recorded, [])
self.assertTrue(router.allow_migrate('default', 'app_label'))
self.assertEqual(force_text(recorded.pop().message), msg)
def test_partial_router(self):
"A router can choose to implement a subset of methods"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# First check the baseline behavior.
self.assertEqual(router.db_for_read(User), 'other')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'default')
self.assertEqual(router.db_for_write(Book), 'default')
self.assertTrue(router.allow_relation(dive, dive))
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]):
self.assertEqual(router.db_for_read(User), 'default')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'writer')
self.assertEqual(router.db_for_write(Book), 'writer')
self.assertTrue(router.allow_relation(dive, dive))
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
def test_database_routing(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
pro.authors = [marty]
# Create a book and author on the other database
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# An update query will be routed to the default database
Book.objects.filter(title='Pro Django').update(pages=200)
with self.assertRaises(Book.DoesNotExist):
# By default, the get query will be directed to 'other'
Book.objects.get(title='Pro Django')
# But the same query issued explicitly at a database will work.
pro = Book.objects.using('default').get(title='Pro Django')
# Check that the update worked.
self.assertEqual(pro.pages, 200)
# An update query with an explicit using clause will be routed
# to the requested database.
Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
# Related object queries stick to the same database
# as the original object, regardless of the router
self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin'])
self.assertEqual(pro.editor.name, 'Marty Alchin')
# get_or_create is a special case. The get needs to be targeted at
# the write database in order to avoid potential transaction
# consistency problems
book, created = Book.objects.get_or_create(title="Pro Django")
self.assertFalse(created)
book, created = Book.objects.get_or_create(title="Dive Into Python",
defaults={'published': datetime.date(2009, 5, 4)})
self.assertTrue(created)
# Check the head count of objects
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 1)
# If a database isn't specified, the read database is used
self.assertEqual(Book.objects.count(), 1)
# A delete query will also be routed to the default database
Book.objects.filter(pages__gt=150).delete()
# The default database has lost the book.
self.assertEqual(Book.objects.using('default').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
def test_invalid_set_foreign_key_assignment(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
dive = Book.objects.using('other').create(
title="Dive into Python",
published=datetime.date(2009, 5, 4),
)
# Set a foreign key set with an object from a different database
msg = "<Book: Dive into Python> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
marty.edited.set([dive])
def test_foreign_key_cross_database_protection(self):
"Foreign keys can cross databases if they two databases have a common source"
# Create a book and author on the default database
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('default').create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Set a foreign key set with an object from a different database
try:
marty.edited.set([pro, dive], bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Assignment implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive, bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Add implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
# If you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use the
# source of 'other'...
self.assertEqual(dive._state.db, 'other')
chris.save()
dive.editor = chris
html5.editor = mark
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
self.assertEqual(chris._state.db, 'default')
self.assertEqual(html5._state.db, 'default')
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'default')
# For the remainder of this test, create a copy of 'mark' in the
# 'default' database to prevent integrity errors on backends that
# don't defer constraints checks until the end of the transaction
mark.save(using='default')
# This moved 'mark' in the 'default' database, move it back in 'other'
mark.save(using='other')
self.assertEqual(mark._state.db, 'other')
# If you create an object through a FK relation, it will be
# written to the write database, even if the original object
# was on the read database
cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
cheesecake, created = mark.edited.get_or_create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
self.assertEqual(puddles._state.db, 'default')
def test_m2m_cross_database_protection(self):
"M2M relations can cross databases if the database share a source"
# Create books and authors on the inverse to the usual database
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
# Now save back onto the usual database.
# This simulates primary/replica - the objects exist on both database,
# but the _state.db is as it is for all other tests.
pro.save(using='default')
marty.save(using='default')
dive.save(using='other')
mark.save(using='other')
# Check that we have 2 of both types of object on both databases
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 2)
self.assertEqual(Person.objects.using('default').count(), 2)
self.assertEqual(Person.objects.using('other').count(), 2)
# Set a m2m set with an object from a different database
try:
marty.book_set = [pro, dive]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Set a reverse m2m with an object from a different database
try:
dive.authors = [mark, marty]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
alice = dive.authors.create(name='Alice')
self.assertEqual(alice._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
alice, created = dive.authors.get_or_create(name='Alice')
self.assertEqual(alice._state.db, 'default')
bob, created = dive.authors.get_or_create(name='Bob')
self.assertEqual(bob._state.db, 'default')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(alice._state.db, 'default')
self.assertEqual(alice_profile._state.db, 'default')
self.assertEqual(bob._state.db, 'other')
# ... but they will when the affected object is saved.
bob.save()
self.assertEqual(bob._state.db, 'default')
def test_generic_key_cross_database_protection(self):
"Generic Key operations can span databases if they share a source"
# Create a book and author on the default database
pro = Book.objects.using(
'default').create(title="Pro Django", published=datetime.date(2008, 12, 16))
review1 = Review.objects.using(
'default').create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using(
'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
review2 = Review.objects.using(
'other').create(source="Python Weekly", content_object=dive)
# Set a generic foreign key with an object from a different database
try:
review1.content_object = dive
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a generic foreign key set with an object from a different database
try:
dive.reviews.add(review1)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use the source of 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'default')
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
dive = Book.objects.using('other').get(title='Dive into Python')
nyt = dive.reviews.create(source="New York Times", content_object=dive)
self.assertEqual(nyt._state.db, 'default')
def test_m2m_managers(self):
"M2M relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
self.assertEqual(pro.authors.db, 'other')
self.assertEqual(pro.authors.db_manager('default').db, 'default')
self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
self.assertEqual(marty.book_set.db, 'other')
self.assertEqual(marty.book_set.db_manager('default').db, 'default')
self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
def test_foreign_key_managers(self):
"FK reverse relations are represented by managers, and can be controlled like managers"
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
self.assertEqual(marty.edited.db, 'other')
self.assertEqual(marty.edited.db_manager('default').db, 'default')
self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
def test_generic_key_managers(self):
"Generic key relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
Review.objects.using('other').create(source="Python Monthly",
content_object=pro)
self.assertEqual(pro.reviews.db, 'other')
self.assertEqual(pro.reviews.db_manager('default').db, 'default')
self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
sub = Person.objects.filter(name='Mark Pilgrim')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. Don't let routing instructions
# force the subquery to an incompatible database.
str(qs.query)
# If you evaluate the query, it should work, running on 'other'
self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python'])
def test_deferred_models(self):
mark_def = Person.objects.using('default').create(name="Mark Pilgrim")
mark_other = Person.objects.using('other').create(name="Mark Pilgrim")
orig_b = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark_other)
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
self.assertEqual(b.published, datetime.date(2009, 5, 4))
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
b.editor = mark_def
b.save(using='default')
self.assertEqual(Book.objects.using('default').get(pk=b.pk).published,
datetime.date(2009, 5, 4))
@override_settings(DATABASE_ROUTERS=[AuthRouter()])
class AuthTestCase(TestCase):
multi_db = True
def test_auth_manager(self):
"The methods on the auth manager obey database hints"
# Create one user using default allocation policy
User.objects.create_user('alice', 'alice@example.com')
# Create another user, explicitly specifying the database
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# The second user only exists on the other database
alice = User.objects.using('other').get(username='alice')
self.assertEqual(alice.username, 'alice')
self.assertEqual(alice._state.db, 'other')
self.assertRaises(User.DoesNotExist, User.objects.using('default').get, username='alice')
# The second user only exists on the default database
bob = User.objects.using('default').get(username='bob')
self.assertEqual(bob.username, 'bob')
self.assertEqual(bob._state.db, 'default')
self.assertRaises(User.DoesNotExist, User.objects.using('other').get, username='bob')
# That is... there is one user on each database
self.assertEqual(User.objects.using('default').count(), 1)
self.assertEqual(User.objects.using('other').count(), 1)
def test_dumpdata(self):
"Check that dumpdata honors allow_migrate restrictions on the router"
User.objects.create_user('alice', 'alice@example.com')
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# Check that dumping the default database doesn't try to include auth
# because allow_migrate prohibits auth on default
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '[]')
# Check that dumping the other database does include auth
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertIn('"email": "alice@example.com"', command_output)
class AntiPetRouter(object):
# A router that only expresses an opinion on migrate,
# passing pets to the 'other' database
def allow_migrate(self, db, app_label, model_name=None, **hints):
if db == 'other':
return model_name == 'pet'
else:
return model_name != 'pet'
class FixtureTestCase(TestCase):
multi_db = True
fixtures = ['multidb-common', 'multidb']
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# Check that "Pro Django" exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
# Check that "Dive into Python" exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
# Check that "Definitive Guide" exists on the both databases
try:
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using('default').get(title="The Definitive Guide to Django")
Book.objects.using('other').get(title="The Definitive Guide to Django")
except Book.DoesNotExist:
self.fail('"The Definitive Guide to Django" should exist on both databases')
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_pseudo_empty_fixtures(self):
"A fixture can contain entries, but lead to nothing in the database; this shouldn't raise an error (ref #14068)"
new_io = StringIO()
management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
class PickleQuerySetTestCase(TestCase):
multi_db = True
def test_pickling(self):
for db in connections:
Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
qs = Book.objects.all()
self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
class DatabaseReceiver(object):
"""
Used in the tests for the database argument in signals (#13552)
"""
def __call__(self, signal, sender, **kwargs):
self._database = kwargs['using']
class WriteToOtherRouter(object):
"""
A router that sends all writes to the other database.
"""
def db_for_write(self, model, **hints):
return "other"
class SignalTests(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()])
def test_database_arg_save_and_delete(self):
"""
Tests that the pre/post_save signal contains the correct database.
(#13552)
"""
# Make some signal receivers
pre_save_receiver = DatabaseReceiver()
post_save_receiver = DatabaseReceiver()
pre_delete_receiver = DatabaseReceiver()
post_delete_receiver = DatabaseReceiver()
# Make model and connect receivers
signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
signals.post_save.connect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
p = Person.objects.create(name='Darth Vader')
# Save and test receivers got calls
p.save()
self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
# Delete, and test
p.delete()
self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
# Save again to a different database
p.save(using="other")
self.assertEqual(pre_save_receiver._database, "other")
self.assertEqual(post_save_receiver._database, "other")
# Delete, and test
p.delete(using="other")
self.assertEqual(pre_delete_receiver._database, "other")
self.assertEqual(post_delete_receiver._database, "other")
signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver)
signals.post_save.disconnect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver)
def test_database_arg_m2m(self):
"""
Test that the m2m_changed signal has a correct database arg (#13552)
"""
# Make a receiver
receiver = DatabaseReceiver()
# Connect it
signals.m2m_changed.connect(receiver=receiver)
# Create the models that will be used for the tests
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# Create a copy of the models on the 'other' database to prevent
# integrity errors on backends that don't defer constraints checks
Book.objects.using('other').create(pk=b.pk, title=b.title,
published=b.published)
Person.objects.using('other').create(pk=p.pk, name=p.name)
# Test addition
b.authors.add(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.add(p)
self.assertEqual(receiver._database, "other")
# Test removal
b.authors.remove(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.remove(p)
self.assertEqual(receiver._database, "other")
# Test addition in reverse
p.book_set.add(b)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
p.book_set.add(b)
self.assertEqual(receiver._database, "other")
# Test clearing
b.authors.clear()
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.clear()
self.assertEqual(receiver._database, "other")
class AttributeErrorRouter(object):
"A router to test the exception handling of ConnectionRouter"
def db_for_read(self, model, **hints):
raise AttributeError
def db_for_write(self, model, **hints):
raise AttributeError
class RouterAttributeErrorTestCase(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()])
def test_attribute_error_read(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
with self.override_router():
self.assertRaises(AttributeError, Book.objects.get, pk=b.pk)
def test_attribute_error_save(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
with self.override_router():
self.assertRaises(AttributeError, dive.save)
def test_attribute_error_delete(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
b.authors = [p]
b.editor = p
with self.override_router():
self.assertRaises(AttributeError, b.delete)
def test_attribute_error_m2m(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
with self.override_router():
self.assertRaises(AttributeError, setattr, b, 'authors', [p])
class ModelMetaRouter(object):
"A router to ensure model arguments are real model classes"
def db_for_write(self, model, **hints):
if not hasattr(model, '_meta'):
raise ValueError
@override_settings(DATABASE_ROUTERS=[ModelMetaRouter()])
class RouterModelArgumentTestCase(TestCase):
multi_db = True
def test_m2m_collection(self):
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# test add
b.authors.add(p)
# test remove
b.authors.remove(p)
# test clear
b.authors.clear()
# test setattr
b.authors = [p]
# test M2M collection
b.delete()
def test_foreignkey_collection(self):
person = Person.objects.create(name='Bob')
Pet.objects.create(owner=person, name='Wart')
# test related FK collection
person.delete()
class SyncOnlyDefaultDatabaseRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == DEFAULT_DB_ALIAS
class MigrateTestCase(TestCase):
available_apps = [
'multiple_database',
'django.contrib.auth',
'django.contrib.contenttypes'
]
multi_db = True
def test_migrate_to_other_database(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
count = cts.count()
self.assertGreater(count, 0)
cts.delete()
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), count)
def test_migrate_to_other_database_with_router(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
cts.delete()
with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]):
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), 0)
class RouterUsed(Exception):
WRITE = 'write'
def __init__(self, mode, model, hints):
self.mode = mode
self.model = model
self.hints = hints
class RouteForWriteTestCase(TestCase):
multi_db = True
class WriteCheckRouter(object):
def db_for_write(self, model, **hints):
raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
def override_router(self):
return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()])
def test_fk_delete(self):
owner = Person.objects.create(name='Someone')
pet = Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
pet.owner.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_delete(self):
owner = Person.objects.create(name='Someone')
to_del_qs = owner.pet_set.all()
try:
with self.override_router():
to_del_qs.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_get_or_create(self):
owner = Person.objects.create(name='Someone')
try:
with self.override_router():
owner.pet_set.get_or_create(name='fido')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_update(self):
owner = Person.objects.create(name='Someone')
Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
owner.pet_set.update(name='max')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.add(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_get_or_create(self):
Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.get_or_create(name='Someone else')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.remove(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().update(name='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_reverse_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.add(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_get_or_create(self):
auth = Person.objects.create(name='Someone')
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now())
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.remove(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().update(title='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
| bsd-3-clause |
elioth010/lugama | venv/lib/python2.7/site-packages/sqlalchemy/util/queue.py | 55 | 6548 | # util/queue.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant
behavior, using RLock instead of Lock for its mutex object. The
Queue object is used exclusively by the sqlalchemy.pool.QueuePool
class.
This is to support the connection pool's usage of weakref callbacks to return
connections to the underlying Queue, which can in extremely
rare cases be invoked within the ``get()`` method of the Queue itself,
producing a ``put()`` inside the ``get()`` and therefore a reentrant
condition.
"""
from collections import deque
from time import time as _time
from .compat import threading
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0):
"""Initialize a queue object with a given maximum size.
If `maxsize` is <= 0, the queue size is infinite.
"""
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the ``Full`` exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until an item is available. If
`timeout` is a positive number, it blocks at most `timeout`
seconds and raises the ``Empty`` exception if no item was
available within that time. Otherwise (`block` is false),
return an item if one is immediately available, else raise the
``Empty`` exception (`timeout` is ignored in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the ``Empty`` exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
| gpl-2.0 |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/Tools/pybench/Exceptions.py | 92 | 13400 | from pybench import Test
class TryRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 80000
def test(self):
error = ValueError
for i in range(self.rounds):
try:
raise error
except:
pass
try:
raise error
except:
pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
def calibrate(self):
error = ValueError
for i in range(self.rounds):
pass
class TryExcept(Test):
version = 2.0
operations = 15 * 10
rounds = 150000
def test(self):
for i in range(self.rounds):
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
def calibrate(self):
for i in range(self.rounds):
pass
### Test to make Fredrik happy...
if __name__ == '__main__':
import timeit
timeit.TestClass = TryRaiseExcept
timeit.main(['-s', 'test = TestClass(); test.rounds = 1000',
'test.test()'])
| apache-2.0 |
mmarch/azure-quickstart-templates | cloudera-tableau/scripts/cmxDeployOnIbiza.py | 140 | 88056 | #!/usr/bin/env python
#
__version__ = '0.11.2803'
import socket
import re
import urllib
import urllib2
from optparse import OptionParser
import hashlib
import os
import sys
import random
import paramiko
from paramiko import SSHClient
from time import sleep
from cm_api.api_client import ApiResource, ApiException
from cm_api.endpoints.hosts import *
from cm_api.endpoints.services import ApiServiceSetupInfo, ApiService
LOG_DIR='/log/cloudera'
def getParameterValue(vmsize, parameter):
log("vmsize: "+vmsize+", parameter:"+parameter)
switcher = {
"Standard_DS14:yarn_nodemanager_resource_cpu_vcores": "10",
"Standard_DS14:yarn_nodemanager_resource_memory_mb": "45056",
"Standard_DS14:impalad_memory_limit": "42949672960",
"Standard_DS13:yarn_nodemanager_resource_cpu_vcores": "5",
"Standard_DS13:yarn_nodemanager_resource_memory_mb": "20028",
"Standard_DS13:impalad_memory_limit": "21500000000"
}
return switcher.get(vmsize+":"+parameter, "0")
def getDataDiskCount():
bashCommand="lsblk | grep /data | grep -v /data/ | wc -l"
client=SSHClient()
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
log(socket.getfqdn(cmx.cm_server))
toconnect=socket.getfqdn(cmx.cm_server).replace("-mn0", "-dn0")
log(toconnect)
client.connect(toconnect, username=cmx.ssh_root_user, password=cmx.ssh_root_password)
stdin, stdout, stderr = client.exec_command(bashCommand)
count=stdout.readline().rstrip('\n')
return count
def setZookeeperOwnerDir(HA):
os.system("sudo chown zookeeper:zookeeper "+LOG_DIR+"/zookeeper")
# setup other masters in HA environment
if HA:
client=SSHClient()
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
toconnect=socket.getfqdn(cmx.cm_server).replace("-mn0", "-mn1")
client.connect(toconnect, username=cmx.ssh_root_user, password=cmx.ssh_root_password)
client.exec_command("sudo chown zookeeper:zookeeper "+LOG_DIR+"/zookeeper")
toconnect=socket.getfqdn(cmx.cm_server).replace("-mn0", "-mn2")
client.connect(toconnect, username=cmx.ssh_root_user, password=cmx.ssh_root_password)
client.exec_command("sudo chown zookeeper:zookeeper "+LOG_DIR+"/zookeeper")
def init_cluster():
"""
Initialise Cluster
:return:
"""
#using default username/password to login first, create new admin user base on provided value, then delete admin
api = ApiResource(server_host=cmx.cm_server, username="admin", password="admin")
api.create_user(cmx.username, cmx.password, ['ROLE_ADMIN'])
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
api.delete_user("admin")
# Update Cloudera Manager configuration
cm = api.get_cloudera_manager()
cm.update_config({"REMOTE_PARCEL_REPO_URLS": "http://archive.cloudera.com/cdh5/parcels/{latest_supported}",
"PHONE_HOME": False, "PARCEL_DISTRIBUTE_RATE_LIMIT_KBS_PER_SECOND": "1024000"})
print "> Initialise Cluster"
if cmx.cluster_name in [x.name for x in api.get_all_clusters()]:
print "Cluster name: '%s' already exists" % cmx.cluster_name
else:
print "Creating cluster name '%s'" % cmx.cluster_name
api.create_cluster(name=cmx.cluster_name, version=cmx.cluster_version)
def add_hosts_to_cluster():
"""
Add hosts to cluster
:return:
"""
print "> Add hosts to Cluster: %s" % cmx.cluster_name
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
cm = api.get_cloudera_manager()
# deploy agents into host_list
host_list = list(set([socket.getfqdn(x) for x in cmx.host_names] + [socket.getfqdn(cmx.cm_server)]) -
set([x.hostname for x in api.get_all_hosts()]))
if host_list:
cmd = cm.host_install(user_name=cmx.ssh_root_user, host_names=host_list,
password=cmx.ssh_root_password, private_key=cmx.ssh_private_key)
print "Installing host(s) to cluster '%s' - [ http://%s:7180/cmf/command/%s/details ]" % \
(socket.getfqdn(cmx.cm_server), cmx.cm_server, cmd.id)
#check.status_for_command("Hosts: %s " % host_list, cmd)
print "Installing hosts. This might take a while."
while cmd.success == None:
sleep(20)
cmd = cmd.fetch()
print "Installing hosts... Checking"
if cmd.success != True:
print "cm_host_install failed: " + cmd.resultMessage
exit(0)
print "Host install finish, agents installed"
hosts = []
for host in api.get_all_hosts():
if host.hostId not in [x.hostId for x in cluster.list_hosts()]:
print "Adding {'ip': '%s', 'hostname': '%s', 'hostId': '%s'}" % (host.ipAddress, host.hostname, host.hostId)
hosts.append(host.hostId)
print "adding new hosts to cluster"
if hosts:
print "Adding hostId(s) to '%s'" % cmx.cluster_name
print "%s" % hosts
cluster.add_hosts(hosts)
def host_rack():
"""
Add host to rack
:return:
"""
# TODO: Add host to rack
print "> Add host to rack"
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
hosts = []
for h in api.get_all_hosts():
# host = api.create_host(h.hostId, h.hostname,
# socket.gethostbyname(h.hostname),
# "/default_rack")
h.set_rack_id("/default_rack")
hosts.append(h)
cluster.add_hosts(hosts)
def deploy_parcel(parcel_product, parcel_version):
"""
Deploy parcels
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
parcel = cluster.get_parcel(parcel_product, parcel_version)
if parcel.stage != 'ACTIVATED':
print "> Deploying parcel: [ %s-%s ]" % (parcel_product, parcel_version)
parcel.start_download()
# unlike other commands, check progress by looking at parcel stage and status
while True:
parcel = cluster.get_parcel(parcel_product, parcel_version)
if parcel.stage == 'DISTRIBUTED' or parcel.stage == 'DOWNLOADED' or parcel.stage == 'ACTIVATED':
break
# if parcel.state.errors:
# raise Exception(str(parcel.state.errors))
msg = " [%s: %s / %s]" % (parcel.stage, parcel.state.progress, parcel.state.totalProgress)
sys.stdout.write(msg + " " * (78 - len(msg)) + "\r")
sys.stdout.flush()
print ""
print "1. Parcel Stage: %s" % parcel.stage
parcel.start_distribution()
while True:
parcel = cluster.get_parcel(parcel_product, parcel_version)
if parcel.stage == 'DISTRIBUTED' or parcel.stage == 'ACTIVATED':
break
# if parcel.state.errors:
# raise Exception(str(parcel.state.errors))
msg = " [%s: %s / %s]" % (parcel.stage, parcel.state.progress, parcel.state.totalProgress)
sys.stdout.write(msg + " " * (78 - len(msg)) + "\r")
sys.stdout.flush()
print "2. Parcel Stage: %s" % parcel.stage
if parcel.stage == 'DISTRIBUTED':
parcel.activate()
while True:
parcel = cluster.get_parcel(parcel_product, parcel_version)
if parcel.stage != 'ACTIVATED':
msg = " [%s: %s / %s]" % (parcel.stage, parcel.state.progress, parcel.state.totalProgress)
sys.stdout.write(msg + " " * (78 - len(msg)) + "\r")
sys.stdout.flush()
# elif parcel.state.errors:
# raise Exception(str(parcel.state.errors))
else:
print "3. Parcel Stage: %s" % parcel.stage
break
def setup_zookeeper(HA):
"""
Zookeeper
> Waiting for ZooKeeper Service to initialize
Starting ZooKeeper Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "ZOOKEEPER"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "zookeeper"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
cmhost= management.get_cmhost()
service.update_config({"zookeeper_datadir_autocreate": True})
# Ensure zookeeper has access to folder
setZookeeperOwnerDir(HA)
# Role Config Group equivalent to Service Default Group
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "SERVER":
rcg.update_config({"maxClientCnxns": "1024",
"dataLogDir": LOG_DIR+"/zookeeper",
"dataDir": LOG_DIR+"/zookeeper",
"zk_server_log_dir": LOG_DIR+"/zookeeper"})
# Pick 3 hosts and deploy Zookeeper Server role for Zookeeper HA
# mingrui change install on primary, secondary, and CM
if HA:
print cmhost
print [x for x in hosts if x.id == 0 ][0]
print [x for x in hosts if x.id == 1 ][0]
cdh.create_service_role(service, rcg.roleType, cmhost)
cdh.create_service_role(service, rcg.roleType, [x for x in hosts if x.id == 0 ][0])
cdh.create_service_role(service, rcg.roleType, [x for x in hosts if x.id == 1 ][0])
#No HA, using POC setup, all service in one master node aka the cm host
else:
cdh.create_service_role(service, rcg.roleType, cmhost)
# init_zookeeper not required as the API performs this when adding Zookeeper
# check.status_for_command("Waiting for ZooKeeper Service to initialize", service.init_zookeeper())
check.status_for_command("Starting ZooKeeper Service", service.start())
def setup_hdfs(HA):
"""
HDFS
> Checking if the name directories of the NameNode are empty. Formatting HDFS only if empty.
Starting HDFS Service
> Creating HDFS /tmp directory
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "HDFS"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "hdfs"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service_config = cdh.dependencies_for(service)
service_config.update({"dfs_replication": "3",
"dfs_block_local_path_access_user": "impala,hbase,mapred,spark"})
service.update_config(service_config)
# Get Disk Information - assume that all disk configuration is heterogeneous throughout the cluster
default_name_dir_list = ""
default_snn_dir_list = ""
default_data_dir_list = ""
dfs_name_dir_list = default_name_dir_list
dfs_snn_dir_list = default_snn_dir_list
dfs_data_dir_list = default_data_dir_list
for x in range(int(diskcount)):
dfs_data_dir_list+=",/data%d/dfs/dn" % (x)
dfs_name_dir_list+=",/data/dfs/nn"
dfs_snn_dir_list+=",/data/dfs/snn"
#No HA, using POC setup, all service in one master node aka the cm host
if not HA:
nn_host_id=management.get_cmhost()
snn_host_id=management.get_cmhost()
else:
nn_host_id = [host for host in hosts if host.id == 0][0]
snn_host_id = [host for host in hosts if host.id == 1][0]
# Role Config Group equivalent to Service Default Group
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "NAMENODE":
# hdfs-NAMENODE - Default Group
rcg.update_config({"dfs_name_dir_list": dfs_name_dir_list,
"namenode_java_heapsize": "1677058304",
"dfs_namenode_handler_count": "70",
"dfs_namenode_service_handler_count": "70",
"dfs_namenode_servicerpc_address": "8022",
"namenode_log_dir": LOG_DIR+"/hadoop-hdfs"})
cdh.create_service_role(service, rcg.roleType, nn_host_id)
if rcg.roleType == "SECONDARYNAMENODE":
# hdfs-SECONDARYNAMENODE - Default Group
rcg.update_config({"fs_checkpoint_dir_list": dfs_snn_dir_list,
"secondary_namenode_java_heapsize": "1677058304",
"secondarynamenode_log_dir": LOG_DIR+"/hadoop-hdfs"})
# chose a server that it's not NN, easier to enable HDFS-HA later
cdh.create_service_role(service, rcg.roleType, snn_host_id)
if rcg.roleType == "DATANODE":
# hdfs-DATANODE - Default Group
rcg.update_config({"datanode_java_heapsize": "351272960",
"dfs_data_dir_list": dfs_data_dir_list,
"dfs_datanode_data_dir_perm": "755",
"dfs_datanode_du_reserved": "3508717158",
"dfs_datanode_failed_volumes_tolerated": "0",
"dfs_datanode_max_locked_memory": "1257242624",
"datanode_log_dir": LOG_DIR+"/hadoop-hdfs"})
if rcg.roleType == "FAILOVERCONTROLLER":
rcg.update_config({"failover_controller_log_dir": LOG_DIR+"/hadoop-hdfs"})
if rcg.roleType == "HTTPFS":
rcg.update_config({"httpfs_log_dir": LOG_DIR+"/hadoop-httpfs"})
if rcg.roleType == "GATEWAY":
# hdfs-GATEWAY - Default Group
rcg.update_config({"dfs_client_use_trash": True})
# print nn_host_id.hostId
# print snn_host_id.hostId
for role_type in ['DATANODE']:
for host in management.get_hosts(include_cm_host = False):
if host.hostId != nn_host_id.hostId:
if host.hostId != snn_host_id.hostId:
cdh.create_service_role(service, role_type, host)
for role_type in ['GATEWAY']:
for host in management.get_hosts(include_cm_host=(role_type == 'GATEWAY')):
cdh.create_service_role(service, role_type, host)
nn_role_type = service.get_roles_by_type("NAMENODE")[0]
commands = service.format_hdfs(nn_role_type.name)
for cmd in commands:
check.status_for_command("Format NameNode", cmd)
check.status_for_command("Starting HDFS.", service.start())
check.status_for_command("Creating HDFS /tmp directory", service.create_hdfs_tmp())
# Additional HA setting for yarn
if HA:
setup_hdfs_ha()
def setup_hbase():
"""
HBase
> Creating HBase root directory
Starting HBase Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "HBASE"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "hbase"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
master_host_id = [host for host in hosts if host.id == 0][0]
backup_master_host_id = [host for host in hosts if host.id == 1][0]
cmhost = management.get_cmhost()
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "MASTER":
cdh.create_service_role(service, rcg.roleType, master_host_id)
cdh.create_service_role(service, rcg.roleType, backup_master_host_id)
cdh.create_service_role(service, rcg.roleType, cmhost)
if rcg.roleType == "REGIONSERVER":
for host in management.get_hosts(include_cm_host = False):
if host.hostId != master_host_id.hostId:
if host.hostId != backup_master_host_id.hostId:
cdh.create_service_role(service, rcg.roleType, host)
#for role_type in ['HBASETHRIFTSERVER', 'HBASERESTSERVER']:
# cdh.create_service_role(service, role_type, random.choice(hosts))
for role_type in ['GATEWAY']:
for host in management.get_hosts(include_cm_host=(role_type == 'GATEWAY')):
cdh.create_service_role(service, role_type, host)
check.status_for_command("Creating HBase root directory", service.create_hbase_root())
check.status_for_command("Starting HBase Service", service.start())
def setup_solr():
"""
Solr
> Initializing Solr in ZooKeeper
> Creating HDFS home directory for Solr
Starting Solr Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "SOLR"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "solr"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
# Role Config Group equivalent to Service Default Group
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "SOLR_SERVER":
cdh.create_service_role(service, rcg.roleType, [x for x in hosts if x.id == 0][0])
if rcg.roleType == "GATEWAY":
for host in management.get_hosts(include_cm_host=True):
cdh.create_service_role(service, rcg.roleType, host)
# Example of deploy_client_config. Recommended to Deploy Cluster wide client config.
# cdh.deploy_client_config_for(service)
# check.status_for_command("Initializing Solr in ZooKeeper", service._cmd('initSolr'))
# check.status_for_command("Creating HDFS home directory for Solr", service._cmd('createSolrHdfsHomeDir'))
check.status_for_command("Initializing Solr in ZooKeeper", service.init_solr())
check.status_for_command("Creating HDFS home directory for Solr",
service.create_solr_hdfs_home_dir())
# This service is started later on
# check.status_for_command("Starting Solr Service", service.start())
def setup_ks_indexer():
"""
KS_INDEXER
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "KS_INDEXER"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "ks_indexer"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
# Pick 1 host to deploy Lily HBase Indexer Default Group
cdh.create_service_role(service, "HBASE_INDEXER", random.choice(hosts))
# HBase Service-Wide configuration
hbase = cdh.get_service_type('HBASE')
hbase.stop()
hbase.update_config({"hbase_enable_indexing": True, "hbase_enable_replication": True})
hbase.start()
# This service is started later on
# check.status_for_command("Starting Lily HBase Indexer Service", service.start())
def setup_spark_on_yarn():
"""
Sqoop Client
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "SPARK_ON_YARN"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "spark_on_yarn"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
cmhost= management.get_cmhost()
soy=service.get_role_config_group("{0}-SPARK_YARN_HISTORY_SERVER-BASE".format(service_name))
soy.update_config({"log_dir": LOG_DIR+"/spark"})
cdh.create_service_role(service, "SPARK_YARN_HISTORY_SERVER",cmhost)
for host in management.get_hosts(include_cm_host=True):
cdh.create_service_role(service, "GATEWAY", host)
# Example of deploy_client_config. Recommended to Deploy Cluster wide client config.
# cdh.deploy_client_config_for(service)
check.status_for_command("Execute command CreateSparkUserDirCommand on service Spark",
service._cmd('CreateSparkUserDirCommand'))
check.status_for_command("Execute command CreateSparkHistoryDirCommand on service Spark",
service._cmd('CreateSparkHistoryDirCommand'))
check.status_for_command("Execute command SparkUploadJarServiceCommand on service Spark",
service._cmd('SparkUploadJarServiceCommand'))
# This service is started later on
# check.status_for_command("Starting Spark Service", service.start())
def setup_yarn(HA):
"""
Yarn
> Creating MR2 job history directory
> Creating NodeManager remote application log directory
Starting YARN (MR2 Included) Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "YARN"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "yarn"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
# empty list so it won't use ephemeral drive
default_yarn_dir_list = ""
yarn_dir_list = default_yarn_dir_list
for x in range(int(diskcount)):
yarn_dir_list+=",/data%d/yarn/nm" % (x)
cmhost= management.get_cmhost()
rm_host_id = [host for host in hosts if host.id == 0][0]
srm_host_id = [host for host in hosts if host.id == 1][0]
if not HA:
rm_host_id=cmhost
srm_host_id=cmhost
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "RESOURCEMANAGER":
# yarn-RESOURCEMANAGER - Default Group
rcg.update_config({"resource_manager_java_heapsize": "2000000000",
"yarn_scheduler_maximum_allocation_mb": "2568",
"yarn_scheduler_maximum_allocation_vcores": "2",
"resource_manager_log_dir": LOG_DIR+"/hadoop-yarn"})
cdh.create_service_role(service, rcg.roleType, rm_host_id)
if rcg.roleType == "JOBHISTORY":
# yarn-JOBHISTORY - Default Group
rcg.update_config({"mr2_jobhistory_java_heapsize": "1000000000",
"mr2_jobhistory_log_dir": LOG_DIR+"/hadoop-mapreduce"})
cdh.create_service_role(service, rcg.roleType, cmhost)
if rcg.roleType == "NODEMANAGER":
# yarn-NODEMANAGER - Default Group
rcg.update_config({"yarn_nodemanager_heartbeat_interval_ms": "100",
"node_manager_java_heapsize": "2000000000",
"yarn_nodemanager_local_dirs": yarn_dir_list,
"yarn_nodemanager_resource_cpu_vcores": getParameterValue(cmx.vmsize, "yarn_nodemanager_resource_cpu_vcores"),
"yarn_nodemanager_resource_memory_mb": getParameterValue(cmx.vmsize,"yarn_nodemanager_resource_memory_mb"),
"node_manager_log_dir": LOG_DIR+"/hadoop-yarn",
"yarn_nodemanager_log_dirs": LOG_DIR+"/hadoop-yarn/container"})
# for host in hosts:
# cdh.create_service_role(service, rcg.roleType, host)
if rcg.roleType == "GATEWAY":
# yarn-GATEWAY - Default Group
rcg.update_config({"mapred_submit_replication": "3"})
for host in management.get_hosts(include_cm_host=True):
cdh.create_service_role(service, rcg.roleType, host)
#print rm_host_id.hostId
#print srm_host_id.hostId
for role_type in ['NODEMANAGER']:
for host in management.get_hosts(include_cm_host = False):
#print host.hostId
if host.hostId != rm_host_id.hostId:
if host.hostId != srm_host_id.hostId:
cdh.create_service_role(service, role_type, host)
# Example of deploy_client_config. Recommended to Deploy Cluster wide client config.
# cdh.deploy_client_config_for(service)
check.status_for_command("Creating MR2 job history directory", service.create_yarn_job_history_dir())
check.status_for_command("Creating NodeManager remote application log directory",
service.create_yarn_node_manager_remote_app_log_dir())
# This service is started later on
# check.status_for_command("Starting YARN (MR2 Included) Service", service.start())
# Additional HA setting for yarn
if HA:
setup_yarn_ha()
def setup_mapreduce(HA):
"""
MapReduce
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "MAPREDUCE"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "mapreduce"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
jk=management.get_cmhost()
if HA:
jk=[x for x in hosts if x.id == 0][0]
# Service-Wide
service.update_config(cdh.dependencies_for(service))
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "JOBTRACKER":
# mapreduce-JOBTRACKER - Default Group
rcg.update_config({"jobtracker_mapred_local_dir_list": "/mapred/jt"})
cdh.create_service_role(service, rcg.roleType, jk)
if rcg.roleType == "TASKTRACKER":
# mapreduce-TASKTRACKER - Default Group
rcg.update_config({"tasktracker_mapred_local_dir_list": "/mapred/local",
"mapred_tasktracker_map_tasks_maximum": "1",
"mapred_tasktracker_reduce_tasks_maximum": "1", })
if rcg.roleType == "GATEWAY":
# mapreduce-GATEWAY - Default Group
rcg.update_config({"mapred_reduce_tasks": "1", "mapred_submit_replication": "1"})
for role_type in ['GATEWAY', 'TASKTRACKER']:
for host in management.get_hosts(include_cm_host=(role_type == 'GATEWAY')):
cdh.create_service_role(service, role_type, host)
# Example of deploy_client_config. Recommended to Deploy Cluster wide client config.
# cdh.deploy_client_config_for(service)
# This service is started later on
# check.status_for_command("Starting MapReduce Service", service.start())
def setup_hive():
"""
Hive
> Creating Hive Metastore Database
> Creating Hive Metastore Database Tables
> Creating Hive user directory
> Creating Hive warehouse directory
Starting Hive Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "HIVE"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "hive"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
# hive_metastore_database_host: Assuming embedded DB is running from where embedded-db is located.
service_config = {"hive_metastore_database_host": socket.getfqdn(cmx.cm_server),
"hive_metastore_database_user": "hive",
"hive_metastore_database_name": "metastore",
"hive_metastore_database_password": cmx.hive_password,
"hive_metastore_database_port": "5432",
"hive_metastore_database_type": "postgresql"}
service_config.update(cdh.dependencies_for(service))
service.update_config(service_config)
hcat = service.get_role_config_group("{0}-WEBHCAT-BASE".format(service_name))
hcat.update_config({"hcatalog_log_dir": LOG_DIR+"/hcatalog"})
hs2 = service.get_role_config_group("{0}-HIVESERVER2-BASE".format(service_name))
hs2.update_config({"hive_log_dir": LOG_DIR+"/hive"})
hms = service.get_role_config_group("{0}-HIVEMETASTORE-BASE".format(service_name))
hms.update_config({"hive_log_dir": LOG_DIR+"/hive"})
#install to CM node, mingrui
cmhost= management.get_cmhost()
for role_type in ['HIVEMETASTORE', 'HIVESERVER2']:
cdh.create_service_role(service, role_type, cmhost)
for host in management.get_hosts(include_cm_host=True):
cdh.create_service_role(service, "GATEWAY", host)
# Example of deploy_client_config. Recommended to Deploy Cluster wide client config.
# cdh.deploy_client_config_for(service)
check.status_for_command("Creating Hive Metastore Database Tables", service.create_hive_metastore_tables())
check.status_for_command("Creating Hive user directory", service.create_hive_userdir())
check.status_for_command("Creating Hive warehouse directory", service.create_hive_warehouse())
# This service is started later on
# check.status_for_command("Starting Hive Service", service.start())
def setup_sqoop():
"""
Sqoop 2
> Creating Sqoop 2 user directory
Starting Sqoop 2 Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "SQOOP"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "sqoop"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
#install to CM node, mingrui
cmhost= management.get_cmhost()
cdh.create_service_role(service, "SQOOP_SERVER", cmhost)
# check.status_for_command("Creating Sqoop 2 user directory", service._cmd('createSqoopUserDir'))
check.status_for_command("Creating Sqoop 2 user directory", service.create_sqoop_user_dir())
# This service is started later on
# check.status_for_command("Starting Sqoop 2 Service", service.start())
def setup_sqoop_client():
"""
Sqoop Client
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "SQOOP_CLIENT"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "sqoop_client"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
# hosts = get_cluster_hosts()
# Service-Wide
service.update_config({})
for host in management.get_hosts(include_cm_host=True):
cdh.create_service_role(service, "GATEWAY", host)
# Example of deploy_client_config. Recommended to Deploy Cluster wide client config.
# cdh.deploy_client_config_for(service)
def setup_impala(HA):
"""
Impala
> Creating Impala user directory
Starting Impala Service
:return:
"""
default_impala_dir_list = ""
impala_dir_list = default_impala_dir_list
for x in range(int(diskcount)):
impala_dir_list+="/data%d/impala/scratch" % (x)
max_count=int(diskcount)-1
if x < max_count:
impala_dir_list+=","
print "x is %d. Adding comma" % (x)
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "IMPALA"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "impala"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
service_config = {"impala_cmd_args_safety_valve": "-scratch_dirs=%s" % (impala_dir_list) }
service.update_config(service_config)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
impalad=service.get_role_config_group("{0}-IMPALAD-BASE".format(service_name))
impalad.update_config({"log_dir": LOG_DIR+"/impalad",
"impalad_memory_limit": getParameterValue(cmx.vmsize, "impalad_memory_limit")})
#llama=service.get_role_config_group("{0}-LLAMMA-BASE".format(service_name))
#llama.update_config({"log_dir": LOG_DIR+"impala-llama"})
ss = service.get_role_config_group("{0}-STATESTORE-BASE".format(service_name))
ss.update_config({"log_dir": LOG_DIR+"/statestore"})
cs = service.get_role_config_group("{0}-CATALOGSERVER-BASE".format(service_name))
cs.update_config({"log_dir": LOG_DIR+"/catalogd"})
cmhost= management.get_cmhost()
for role_type in ['CATALOGSERVER', 'STATESTORE']:
cdh.create_service_role(service, role_type, cmhost)
if HA:
# Install ImpalaD
head_node_1_host_id = [host for host in hosts if host.id == 0][0]
head_node_2_host_id = [host for host in hosts if host.id == 1][0]
for host in hosts:
# impalad should not be on hn-1 and hn-2
if (host.id!=head_node_1_host_id.id and host.id!=head_node_2_host_id.id):
cdh.create_service_role(service, "IMPALAD", host)
else:
# All master services on CM host, install impalad on datanode host
for host in hosts:
if (host.id!=cmhost.id):
cdh.create_service_role(service, "IMPALAD", host)
check.status_for_command("Creating Impala user directory", service.create_impala_user_dir())
check.status_for_command("Starting Impala Service", service.start())
def setup_oozie():
"""
Oozie
> Creating Oozie database
> Installing Oozie ShareLib in HDFS
Starting Oozie Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "OOZIE"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "oozie"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
# Role Config Group equivalent to Service Default Group
# install to CM server, mingrui
cmhost= management.get_cmhost()
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "OOZIE_SERVER":
rcg.update_config({"oozie_log_dir": LOG_DIR+"/oozie",
"oozie_data_dir": LOG_DIR+"/lib/oozie/data"})
cdh.create_service_role(service, rcg.roleType, cmhost)
check.status_for_command("Creating Oozie database", service.create_oozie_db())
check.status_for_command("Installing Oozie ShareLib in HDFS", service.install_oozie_sharelib())
# This service is started later on
# check.status_for_command("Starting Oozie Service", service.start())
def setup_hue():
"""
Hue
Starting Hue Service
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "HUE"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "hue"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
# Role Config Group equivalent to Service Default Group
# install to CM, mingrui
cmhost= management.get_cmhost()
for rcg in [x for x in service.get_all_role_config_groups()]:
if rcg.roleType == "HUE_SERVER":
rcg.update_config({"hue_server_log_dir": LOG_DIR+"/hue"})
cdh.create_service_role(service, "HUE_SERVER", cmhost)
if rcg.roleType == "KT_RENEWER":
rcg.update_config({"kt_renewer_log_dir": LOG_DIR+"/hue"})
# This service is started later on
# check.status_for_command("Starting Hue Service", service.start())
def setup_flume():
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "FLUME"
if cdh.get_service_type(service_type) is None:
service_name = "flume"
cluster.create_service(service_name.lower(), service_type)
service = cluster.get_service(service_name)
# Service-Wide
service.update_config(cdh.dependencies_for(service))
hosts = management.get_hosts()
cdh.create_service_role(service, "AGENT", [x for x in hosts if x.id == 0][0])
# This service is started later on
# check.status_for_command("Starting Flume Agent", service.start())
def setup_hdfs_ha():
"""
Setup hdfs-ha
:return:
"""
# api = ApiResource(cmx.cm_server, username=cmx.username, password=cmx.password, version=6)
# cluster = api.get_cluster(cmx.cluster_name)
try:
print "> Setup HDFS-HA"
hdfs = cdh.get_service_type('HDFS')
zookeeper = cdh.get_service_type('ZOOKEEPER')
# Requirement Hive/Hue
hive = cdh.get_service_type('HIVE')
hue = cdh.get_service_type('HUE')
hosts = management.get_hosts()
nn=[x for x in hosts if x.id == 0 ][0]
snn=[x for x in hosts if x.id == 1 ][0]
cm=management.get_cmhost()
if len(hdfs.get_roles_by_type("NAMENODE")) != 2:
# QJM require 3 nodes
jn = random.sample([x.hostRef.hostId for x in hdfs.get_roles_by_type("DATANODE")], 3)
# get NAMENODE and SECONDARYNAMENODE hostId
nn_host_id = hdfs.get_roles_by_type("NAMENODE")[0].hostRef.hostId
sndnn_host_id = hdfs.get_roles_by_type("SECONDARYNAMENODE")[0].hostRef.hostId
# Occasionally SECONDARYNAMENODE is also installed on the NAMENODE
if nn_host_id == sndnn_host_id:
standby_host_id = random.choice([x.hostId for x in jn if x.hostId not in [nn_host_id, sndnn_host_id]])
elif nn_host_id is not sndnn_host_id:
standby_host_id = sndnn_host_id
else:
standby_host_id = random.choice([x.hostId for x in hosts if x.hostId is not nn_host_id])
# hdfs-JOURNALNODE - Default Group
role_group = hdfs.get_role_config_group("%s-JOURNALNODE-BASE" % hdfs.name)
role_group.update_config({"dfs_journalnode_edits_dir": "/data/dfs/jn"})
cmd = hdfs.enable_nn_ha(hdfs.get_roles_by_type("NAMENODE")[0].name, standby_host_id,
"nameservice1", [dict(jnHostId=nn_host_id), dict(jnHostId=sndnn_host_id), dict(jnHostId=cm.hostId)],
zk_service_name=zookeeper.name)
check.status_for_command("Enable HDFS-HA - [ http://%s:7180/cmf/command/%s/details ]" %
(socket.getfqdn(cmx.cm_server), cmd.id), cmd)
# hdfs-HTTPFS
cdh.create_service_role(hdfs, "HTTPFS", [x for x in hosts if x.id == 0][0])
# Configure HUE service dependencies
cdh('HDFS').stop()
cdh('ZOOKEEPER').stop()
if hue is not None:
hue.update_config(cdh.dependencies_for(hue))
if hive is not None:
check.status_for_command("Update Hive Metastore NameNodes", hive.update_metastore_namenodes())
cdh('ZOOKEEPER').start()
cdh('HDFS').start()
except ApiException as err:
print " ERROR: %s" % err.message
def setup_yarn_ha():
"""
Setup yarn-ha
:return:
"""
# api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
# cluster = api.get_cluster(cmx.cluster_name)
print "> Setup YARN-HA"
yarn = cdh.get_service_type('YARN')
zookeeper = cdh.get_service_type('ZOOKEEPER')
hosts = management.get_hosts()
# hosts = api.get_all_hosts()
if len(yarn.get_roles_by_type("RESOURCEMANAGER")) != 2:
# Choose secondary name node for standby RM
rm = [x for x in hosts if x.id == 1 ][0]
cmd = yarn.enable_rm_ha(rm.hostId, zookeeper.name)
check.status_for_command("Enable YARN-HA - [ http://%s:7180/cmf/command/%s/details ]" %
(socket.getfqdn(cmx.cm_server), cmd.id), cmd)
def setup_kerberos():
"""
Setup Kerberos - work in progress
:return:
"""
# api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
# cluster = api.get_cluster(cmx.cluster_name)
print "> Setup Kerberos"
hdfs = cdh.get_service_type('HDFS')
zookeeper = cdh.get_service_type('ZOOKEEPER')
hue = cdh.get_service_type('HUE')
hosts = management.get_hosts()
# HDFS Service-Wide
hdfs.update_config({"hadoop_security_authentication": "kerberos", "hadoop_security_authorization": True})
# hdfs-DATANODE-BASE - Default Group
role_group = hdfs.get_role_config_group("%s-DATANODE-BASE" % hdfs.name)
role_group.update_config({"dfs_datanode_http_port": "1006", "dfs_datanode_port": "1004",
"dfs_datanode_data_dir_perm": "700"})
# Zookeeper Service-Wide
zookeeper.update_config({"enableSecurity": True})
cdh.create_service_role(hue, "KT_RENEWER", [x for x in hosts if x.id == 0][0])
def setup_sentry():
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "SENTRY"
if cdh.get_service_type(service_type) is None:
service_name = "sentry"
cluster.create_service(service_name.lower(), service_type)
service = cluster.get_service(service_name)
# Service-Wide
# sentry_server_database_host: Assuming embedded DB is running from where embedded-db is located.
service_config = {"sentry_server_database_host": socket.getfqdn(cmx.cm_server),
"sentry_server_database_user": "sentry",
"sentry_server_database_name": "sentry",
"sentry_server_database_password": "cloudera",
"sentry_server_database_port": "5432",
"sentry_server_database_type": "postgresql"}
service_config.update(cdh.dependencies_for(service))
service.update_config(service_config)
hosts = management.get_hosts()
#Mingrui install sentry to cm host
cmhost= management.get_cmhost()
cdh.create_service_role(service, "SENTRY_SERVER", cmhost)
check.status_for_command("Creating Sentry Database Tables", service.create_sentry_database_tables())
# Update configuration for Hive service
hive = cdh.get_service_type('HIVE')
hive.update_config(cdh.dependencies_for(hive))
# Disable HiveServer2 Impersonation - hive-HIVESERVER2-BASE - Default Group
role_group = hive.get_role_config_group("%s-HIVESERVER2-BASE" % hive.name)
role_group.update_config({"hiveserver2_enable_impersonation": False})
# This service is started later on
# check.status_for_command("Starting Sentry Server", service.start())
def setup_easy():
"""
An example using auto_assign_roles() and auto_configure()
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
print "> Easy setup for cluster: %s" % cmx.cluster_name
# Do not install these services
do_not_install = ['KEYTRUSTEE', 'KMS', 'KS_INDEXER', 'ISILON', 'FLUME', 'MAPREDUCE', 'ACCUMULO',
'ACCUMULO16', 'SPARK_ON_YARN', 'SPARK', 'SOLR', 'SENTRY']
service_types = list(set(cluster.get_service_types()) - set(do_not_install))
for service in service_types:
cluster.create_service(name=service.lower(), service_type=service.upper())
cluster.auto_assign_roles()
cluster.auto_configure()
# Hive Metastore DB and dependencies ['YARN', 'ZOOKEEPER']
service = cdh.get_service_type('HIVE')
service_config = {"hive_metastore_database_host": socket.getfqdn(cmx.cm_server),
"hive_metastore_database_user": "hive",
"hive_metastore_database_name": "metastore",
"hive_metastore_database_password": cmx.hive_password,
"hive_metastore_database_port": "5432",
"hive_metastore_database_type": "postgresql"}
service_config.update(cdh.dependencies_for(service))
service.update_config(service_config)
check.status_for_command("Executing first run command. This might take a while.", cluster.first_run())
def teardown(keep_cluster=True):
"""
Teardown the Cluster
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
try:
cluster = api.get_cluster(cmx.cluster_name)
service_list = cluster.get_all_services()
print "> Teardown Cluster: %s Services and keep_cluster: %s" % (cmx.cluster_name, keep_cluster)
check.status_for_command("Stop %s" % cmx.cluster_name, cluster.stop())
for service in service_list[:None:-1]:
try:
check.status_for_command("Stop Service %s" % service.name, service.stop())
except ApiException as err:
print " ERROR: %s" % err.message
print "Processing service %s" % service.name
for role in service.get_all_roles():
print " Delete role %s" % role.name
service.delete_role(role.name)
cluster.delete_service(service.name)
except ApiException as err:
print err.message
exit(1)
# Delete Management Services
try:
mgmt = api.get_cloudera_manager()
check.status_for_command("Stop Management services", mgmt.get_service().stop())
mgmt.delete_mgmt_service()
except ApiException as err:
print " ERROR: %s" % err.message
# cluster.remove_all_hosts()
if not keep_cluster:
# Remove CDH Parcel and GPL Extras Parcel
for x in cmx.parcel:
print "Removing parcel: [ %s-%s ]" % (x['product'], x['version'])
parcel_product = x['product']
parcel_version = x['version']
while True:
parcel = cluster.get_parcel(parcel_product, parcel_version)
if parcel.stage == 'ACTIVATED':
print "Deactivating parcel"
parcel.deactivate()
else:
break
while True:
parcel = cluster.get_parcel(parcel_product, parcel_version)
if parcel.stage == 'DISTRIBUTED':
print "Executing parcel.start_removal_of_distribution()"
parcel.start_removal_of_distribution()
print "Executing parcel.remove_download()"
parcel.remove_download()
elif parcel.stage == 'UNDISTRIBUTING':
msg = " [%s: %s / %s]" % (parcel.stage, parcel.state.progress, parcel.state.totalProgress)
sys.stdout.write(msg + " " * (78 - len(msg)) + "\r")
sys.stdout.flush()
else:
break
print "Deleting cluster: %s" % cmx.cluster_name
api.delete_cluster(cmx.cluster_name)
class ManagementActions:
"""
Example stopping 'ACTIVITYMONITOR', 'REPORTSMANAGER' Management Role
:param role_list:
:param action:
:return:
"""
def __init__(self, *role_list):
self._role_list = role_list
self._api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
self._cm = self._api.get_cloudera_manager()
try:
self._service = self._cm.get_service()
except ApiException:
self._service = self._cm.create_mgmt_service(ApiServiceSetupInfo())
self._role_types = [x.type for x in self._service.get_all_roles()]
def stop(self):
self._action('stop_roles')
def start(self):
self._action('start_roles')
def restart(self):
self._action('restart_roles')
def _action(self, action):
state = {'start_roles': ['STOPPED'], 'stop_roles': ['STARTED'], 'restart_roles': ['STARTED', 'STOPPED']}
for mgmt_role in [x for x in self._role_list if x in self._role_types]:
for role in [x for x in self._service.get_roles_by_type(mgmt_role) if x.roleState in state[action]]:
for cmd in getattr(self._service, action)(role.name):
check.status_for_command("%s role %s" % (action.split("_")[0].upper(), mgmt_role), cmd)
def setup(self):
"""
Setup Management Roles
'ACTIVITYMONITOR', 'ALERTPUBLISHER', 'EVENTSERVER', 'HOSTMONITOR', 'SERVICEMONITOR'
Requires License: 'NAVIGATOR', 'NAVIGATORMETASERVER', 'REPORTSMANAGER"
:return:
"""
# api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
print "> Setup Management Services"
self._cm.update_config({"TSQUERY_STREAMS_LIMIT": 1000})
hosts = management.get_hosts(include_cm_host=True)
# pick hostId that match the ipAddress of cm_server
# mgmt_host may be empty then use the 1st host from the -w
try:
mgmt_host = [x for x in hosts if x.ipAddress == socket.gethostbyname(cmx.cm_server)][0]
except IndexError:
mgmt_host = [x for x in hosts if x.id == 0][0]
for role_type in [x for x in self._service.get_role_types() if x in self._role_list]:
try:
if not [x for x in self._service.get_all_roles() if x.type == role_type]:
print "Creating Management Role %s " % role_type
role_name = "mgmt-%s-%s" % (role_type, mgmt_host.md5host)
for cmd in self._service.create_role(role_name, role_type, mgmt_host.hostId).get_commands():
check.status_for_command("Creating %s" % role_name, cmd)
except ApiException as err:
print "ERROR: %s " % err.message
# now configure each role
for group in [x for x in self._service.get_all_role_config_groups() if x.roleType in self._role_list]:
if group.roleType == "ACTIVITYMONITOR":
group.update_config({"firehose_database_host": "%s:5432" % socket.getfqdn(cmx.cm_server),
"firehose_database_user": "amon",
"firehose_database_password": cmx.amon_password,
"firehose_database_type": "postgresql",
"firehose_database_name": "amon",
"mgmt_log_dir": LOG_DIR+"/cloudera-scm-firehose",
"firehose_heapsize": "215964392"})
elif group.roleType == "ALERTPUBLISHER":
group.update_config({"mgmt_log_dir": LOG_DIR+"/cloudera-scm-alertpublisher"})
elif group.roleType == "EVENTSERVER":
group.update_config({"event_server_heapsize": "215964392",
"mgmt_log_dir": LOG_DIR+"/cloudera-scm-eventserver",
"eventserver_index_dir": LOG_DIR+"/lib/cloudera-scm-eventserver"})
elif group.roleType == "HOSTMONITOR":
group.update_config({"mgmt_log_dir": LOG_DIR+"/cloudera-scm-firehose",
"firehose_storage_dir": LOG_DIR+"/lib/cloudera-host-monitor"})
elif group.roleType == "SERVICEMONITOR":
group.update_config({"mgmt_log_dir": LOG_DIR+"/cloudera-scm-firehose",
"firehose_storage_dir": LOG_DIR+"/lib/cloudera-service-monitor"})
elif group.roleType == "NAVIGATOR" and management.licensed():
group.update_config({})
elif group.roleType == "NAVIGATORMETADATASERVER" and management.licensed():
group.update_config({})
elif group.roleType == "REPORTSMANAGER" and management.licensed():
group.update_config({"headlamp_database_host": "%s:5432" % socket.getfqdn(cmx.cm_server),
"headlamp_database_name": "rman",
"headlamp_database_password": cmx.rman_password,
"headlamp_database_type": "postgresql",
"headlamp_database_user": "rman",
"headlamp_scratch_dir": LOG_DIR+"/lib/cloudera-scm-headlamp",
"mgmt_log_dir": LOG_DIR+"/cloudera-scm-headlamp"})
elif group.roleType == "OOZIE":
group.update_config({"oozie_database_host": "%s:5432" % socket.getfqdn(cmx.cm_server),
"oozie_database_name": "oozie",
"oozie_database_password": cmx.oozie_password,
"oozie_database_type": "postgresql",
"oozie_database_user": "oozie",
"oozie_log_dir": LOG_DIR+"/oozie" })
@classmethod
def licensed(cls):
"""
Check if Cluster is licensed
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cm = api.get_cloudera_manager()
try:
return bool(cm.get_license().uuid)
except ApiException as err:
return "Express" not in err.message
@classmethod
def upload_license(cls):
"""
Upload License file
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cm = api.get_cloudera_manager()
if cmx.license_file and not management.licensed():
print "Upload license"
with open(cmx.license_file, 'r') as f:
license_contents = f.read()
print "Upload CM License: \n %s " % license_contents
cm.update_license(license_contents)
# REPORTSMANAGER required after applying license
management("REPORTSMANAGER").setup()
management("REPORTSMANAGER").start()
@classmethod
def begin_trial(cls):
"""
Begin Trial
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
print "def begin_trial"
if not management.licensed():
try:
api.post("/cm/trial/begin")
# REPORTSMANAGER required after applying license
management("REPORTSMANAGER").setup()
management("REPORTSMANAGER").start()
except ApiException as err:
print err.message
@classmethod
def get_mgmt_password(cls, role_type):
"""
Get password for "ACTIVITYMONITOR', 'REPORTSMANAGER', 'NAVIGATOR", "OOZIE", "HIVEMETASTORESERVER"
:param role_type:
:return:
"""
contents = []
mgmt_password = False
if os.path.exists('/etc/cloudera-scm-server'):
file_path = os.path.join('/etc/cloudera-scm-server', 'db.mgmt.properties')
try:
with open(file_path) as f:
contents = f.readlines()
except IOError:
print "Unable to open file %s." % file_path
# role_type expected to be in
# ACTIVITYMONITOR, REPORTSMANAGER, NAVIGATOR, OOZIE, HIVEMETASTORESERVER
if role_type in ['ACTIVITYMONITOR', 'REPORTSMANAGER', 'NAVIGATOR','OOZIE','HIVEMETASTORESERVER']:
idx = "com.cloudera.cmf.%s.db.password=" % role_type
match = [s.rstrip('\n') for s in contents if idx in s][0]
mgmt_password = match[match.index(idx) + len(idx):]
return mgmt_password
@classmethod
def get_cmhost(cls):
"""
return cm host in the same format as other host
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
idx = len(set(enumerate(cmx.host_names)))
_host = [x for x in api.get_all_hosts() if x.ipAddress == socket.gethostbyname(cmx.cm_server)][0]
cmhost={
'id': idx,
'hostId': _host.hostId,
'hostname': _host.hostname,
'md5host': hashlib.md5(_host.hostname).hexdigest(),
'ipAddress': _host.ipAddress,
}
return type('', (), cmhost)
@classmethod
def get_hosts(cls, include_cm_host=False):
"""
because api.get_all_hosts() returns all the hosts as instanceof ApiHost: hostId hostname ipAddress
and cluster.list_hosts() returns all the cluster hosts as instanceof ApiHostRef: hostId
we only need Cluster hosts with instanceof ApiHost: hostId hostname ipAddress + md5host
preserve host order in -w
hashlib.md5(host.hostname).hexdigest()
attributes = {'id': None, 'hostId': None, 'hostname': None, 'md5host': None, 'ipAddress': None, }
return a list of hosts
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
w_hosts = set(enumerate(cmx.host_names))
if include_cm_host and socket.gethostbyname(cmx.cm_server) \
not in [socket.gethostbyname(x) for x in cmx.host_names]:
w_hosts.add((len(w_hosts), cmx.cm_server))
hosts = []
for idx, host in w_hosts:
_host = [x for x in api.get_all_hosts() if x.ipAddress == socket.gethostbyname(host)][0]
hosts.append({
'id': idx,
'hostId': _host.hostId,
'hostname': _host.hostname,
'md5host': hashlib.md5(_host.hostname).hexdigest(),
'ipAddress': _host.ipAddress,
})
return [type('', (), x) for x in hosts]
@classmethod
def restart_management(cls):
"""
Restart Management Services
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
mgmt = api.get_cloudera_manager().get_service()
check.status_for_command("Stop Management services", mgmt.stop())
check.status_for_command("Start Management services", mgmt.start())
class ServiceActions:
"""
Example stopping/starting services ['HBASE', 'IMPALA', 'SPARK', 'SOLR']
:param service_list:
:param action:
:return:
"""
def __init__(self, *service_list):
self._service_list = service_list
self._api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
self._cluster = self._api.get_cluster(cmx.cluster_name)
def stop(self):
self._action('stop')
def start(self):
self._action('start')
def restart(self):
self._action('restart')
def _action(self, action):
state = {'start': ['STOPPED'], 'stop': ['STARTED'], 'restart': ['STARTED', 'STOPPED']}
for services in [x for x in self._cluster.get_all_services()
if x.type in self._service_list and x.serviceState in state[action]]:
check.status_for_command("%s service %s" % (action.upper(), services.type),
getattr(self._cluster.get_service(services.name), action)())
@classmethod
def get_service_type(cls, name):
"""
Returns service based on service type name
:param name:
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
try:
service = [x for x in cluster.get_all_services() if x.type == name][0]
except IndexError:
service = None
return service
@classmethod
def deploy_client_config_for(cls, obj):
"""
Example deploying GATEWAY Client Config on each host
Note: only recommended if you need to deploy on a specific hostId.
Use the cluster.deploy_client_config() for normal use.
example usage:
# hostId
for host in get_cluster_hosts(include_cm_host=True):
deploy_client_config_for(host.hostId)
# cdh service
for service in cluster.get_all_services():
deploy_client_config_for(service)
:param host.hostId, or ApiService:
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
# cluster = api.get_cluster(cmx.cluster_name)
if isinstance(obj, str) or isinstance(obj, unicode):
for role_name in [x.roleName for x in api.get_host(obj).roleRefs if 'GATEWAY' in x.roleName]:
service = cdh.get_service_type('GATEWAY')
print "Deploying client config for service: %s - host: [%s]" % \
(service.type, api.get_host(obj).hostname)
check.status_for_command("Deploy client config for role %s" %
role_name, service.deploy_client_config(role_name))
elif isinstance(obj, ApiService):
for role in obj.get_roles_by_type("GATEWAY"):
check.status_for_command("Deploy client config for role %s" %
role.name, obj.deploy_client_config(role.name))
@classmethod
def create_service_role(cls, service, role_type, host):
"""
Helper function to create a role
:return:
"""
service_name = service.name[:4] + hashlib.md5(service.name).hexdigest()[:8] \
if len(role_type) > 24 else service.name
role_name = "-".join([service_name, role_type, host.md5host])[:64]
print "Creating role: %s on host: [%s]" % (role_name, host.hostname)
for cmd in service.create_role(role_name, role_type, host.hostId).get_commands():
check.status_for_command("Creating role: %s on host: [%s]" % (role_name, host.hostname), cmd)
@classmethod
def restart_cluster(cls):
"""
Restart Cluster and Cluster wide deploy client config
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
print "Restart cluster: %s" % cmx.cluster_name
check.status_for_command("Stop %s" % cmx.cluster_name, cluster.stop())
check.status_for_command("Start %s" % cmx.cluster_name, cluster.start())
# Example deploying cluster wide Client Config
check.status_for_command("Deploy client config for %s" % cmx.cluster_name, cluster.deploy_client_config())
@classmethod
def dependencies_for(cls, service):
"""
Utility function returns dict of service dependencies
:return:
"""
service_config = {}
config_types = {"hue_webhdfs": ['NAMENODE', 'HTTPFS'], "hdfs_service": "HDFS", "sentry_service": "SENTRY",
"zookeeper_service": "ZOOKEEPER", "hbase_service": "HBASE", "solr_service": "SOLR",
"hive_service": "HIVE", "sqoop_service": "SQOOP",
"impala_service": "IMPALA", "oozie_service": "OOZIE",
"mapreduce_yarn_service": ['MAPREDUCE', 'YARN'], "yarn_service": "YARN"}
dependency_list = []
# get required service config
for k, v in service.get_config(view="full")[0].items():
if v.required:
dependency_list.append(k)
# Extended dependence list, adding the optional ones as well
if service.type == 'HUE':
dependency_list.extend(['sqoop_service',
'impala_service'])
if service.type in ['HIVE', 'HDFS', 'HUE', 'HBASE', 'OOZIE', 'MAPREDUCE', 'YARN']:
dependency_list.append('zookeeper_service')
# if service.type in ['HIVE']:
# dependency_list.append('sentry_service')
if service.type == 'OOZIE':
dependency_list.append('hive_service')
# if service.type in ['FLUME', 'IMPALA']:
# dependency_list.append('hbase_service')
if service.type in ['FLUME', 'SPARK', 'SENTRY']:
dependency_list.append('hdfs_service')
# if service.type == 'FLUME':
# dependency_list.append('solr_service')
for key in dependency_list:
if key == "hue_webhdfs":
hdfs = cdh.get_service_type('HDFS')
if hdfs is not None:
service_config[key] = [x.name for x in hdfs.get_roles_by_type('NAMENODE')][0]
# prefer HTTPS over NAMENODE
if [x.name for x in hdfs.get_roles_by_type('HTTPFS')]:
service_config[key] = [x.name for x in hdfs.get_roles_by_type('HTTPFS')][0]
elif key == "mapreduce_yarn_service":
for _type in config_types[key]:
if cdh.get_service_type(_type) is not None:
service_config[key] = cdh.get_service_type(_type).name
# prefer YARN over MAPREDUCE
if cdh.get_service_type(_type) is not None and _type == 'YARN':
service_config[key] = cdh.get_service_type(_type).name
elif key == "hue_hbase_thrift":
hbase = cdh.get_service_type('HBASE')
if hbase is not None:
service_config[key] = [x.name for x in hbase.get_roles_by_type(config_types[key])][0]
else:
if cdh.get_service_type(config_types[key]) is not None:
service_config[key] = cdh.get_service_type(config_types[key]).name
return service_config
class ActiveCommands:
def __init__(self):
self._api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
def status_for_command(self, message, command):
"""
Helper to check active command status
:param message:
:param command:
:return:
"""
_state = 0
_bar = ['[|]', '[/]', '[-]', '[\\]']
while True:
if self._api.get("/commands/%s" % command.id)['active']:
sys.stdout.write(_bar[_state] + ' ' + message + ' ' + ('\b' * (len(message) + 5)))
sys.stdout.flush()
_state += 1
if _state > 3:
_state = 0
time.sleep(2)
else:
print "\n [%s] %s" % (command.id, self._api.get("/commands/%s" % command.id)['resultMessage'])
self._child_cmd(self._api.get("/commands/%s" % command.id)['children']['items'])
break
def _child_cmd(self, cmd):
"""
Helper cmd has child objects
:param cmd:
:return:
"""
if len(cmd) != 0:
print " Sub tasks result(s):"
for resMsg in cmd:
if resMsg.get('resultMessage'):
print " [%s] %s" % (resMsg['id'], resMsg['resultMessage']) if not resMsg.get('roleRef') \
else " [%s] %s - %s" % (resMsg['id'], resMsg['resultMessage'], resMsg['roleRef']['roleName'])
self._child_cmd(self._api.get("/commands/%s" % resMsg['id'])['children']['items'])
def display_eula():
fname=raw_input("Please enter your first name: ")
lname=raw_input("Please enter your last name: ")
company=raw_input("Please enter your company: ")
email=raw_input("Please enter your email: ")
phone=raw_input("Please enter your phone: ")
jobrole=raw_input("Please enter your jobrole: ")
jobfunction=raw_input("Please enter your jobfunction: ")
accepted=raw_input("Please enter yes to accept EULA: ")
if accepted =='yes' and fname and lname and company and email and phone and jobrole and jobfunction:
postEulaInfo(fname, lname, email, company,
jobrole, jobfunction, phone)
return True
else:
return False
def parse_options():
global cmx
global check, cdh, management
cmx_config_options = {'ssh_root_password': None, 'ssh_root_user': 'root', 'ssh_private_key': None,
'cluster_name': 'Cluster 1', 'cluster_version': 'CDH5',
'username': 'cmadmin', 'password': 'cmpassword', 'cm_server': None,
'host_names': None, 'license_file': None, 'parcel': [], 'company': None,
'email': None, 'phone': None, 'fname': None, 'lname': None, 'jobrole': None,
'jobfunction': None, 'vmsize': None,'do_post':True}
def cmx_args(option, opt_str, value, *args, **kwargs):
if option.dest == 'host_names':
print "switch %s value check: %s" % (opt_str, value)
for host in value.split(','):
if not hostname_resolves(host):
exit(1)
else:
cmx_config_options[option.dest] = [socket.gethostbyname(x) for x in value.split(',')]
elif option.dest == 'cm_server':
print "switch %s value check: %s" % (opt_str, value)
cmx_config_options[option.dest] = socket.gethostbyname(value) if \
hostname_resolves(value) else exit(1)
retry_count = 5
while retry_count > 0:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not s.connect_ex((socket.gethostbyname(value), 7180)) == 0:
print "Cloudera Manager Server is not started on %s " % value
s.close()
sleep(60)
else:
break
retry_count -= 1
if retry_count == 0:
print "Couldn't connect to Cloudera Manager after 5 minutes, exiting"
exit(1)
elif option.dest == 'ssh_private_key':
with open(value, 'r') as f:
license_contents = f.read()
cmx_config_options[option.dest] = license_contents
else:
cmx_config_options[option.dest] = value
def hostname_resolves(hostname):
"""
Check if hostname resolves
:param hostname:
:return:
"""
try:
if socket.gethostbyname(hostname) == '0.0.0.0':
print "Error [{'host': '%s', 'fqdn': '%s'}]" % \
(socket.gethostbyname(hostname), socket.getfqdn(hostname))
return False
else:
print "Success [{'host': '%s', 'fqdn': '%s'}]" % \
(socket.gethostbyname(hostname), socket.getfqdn(hostname))
return True
except socket.error:
print "Error 'host': '%s'" % hostname
return False
def manifest_to_dict(manifest_json):
if manifest_json:
dir_list = json.load(
urllib2.urlopen(manifest_json))['parcels'][0]['parcelName']
parcel_part = re.match(r"^(.*?)-(.*)-(.*?)$", dir_list).groups()
return {'product': str(parcel_part[0]).upper(), 'version': str(parcel_part[1]).lower()}
else:
raise Exception("Invalid manifest.json")
parser = OptionParser()
parser.add_option('-m', '--cm-server', dest='cm_server', type="string", action='callback', callback=cmx_args,
help='*Set Cloudera Manager Server Host. '
'Note: This is the host where the Cloudera Management Services get installed.')
parser.add_option('-w', '--host-names', dest='host_names', type="string", action='callback',
callback=cmx_args,
help='*Set target node(s) list, separate with comma eg: -w host1,host2,...,host(n). '
'Note:'
' - enclose in double quote, also avoid leaving spaces between commas.'
' - CM_SERVER excluded in this list, if you want install CDH Services in CM_SERVER'
' add the host to this list.')
parser.add_option('-n', '--cluster-name', dest='cluster_name', type="string", action='callback',
callback=cmx_args, default='Cluster 1',
help='Set Cloudera Manager Cluster name enclosed in double quotes. Default "Cluster 1"')
parser.add_option('-u', '--ssh-root-user', dest='ssh_root_user', type="string", action='callback',
callback=cmx_args, default='root', help='Set target node(s) ssh username. Default root')
parser.add_option('-p', '--ssh-root-password', dest='ssh_root_password', type="string", action='callback',
callback=cmx_args, help='*Set target node(s) ssh password..')
parser.add_option('-k', '--ssh-private-key', dest='ssh_private_key', type="string", action='callback',
callback=cmx_args, help='The private key to authenticate with the hosts. '
'Specify either this or a password.')
parser.add_option('-l', '--license-file', dest='license_file', type="string", action='callback',
callback=cmx_args, help='Cloudera Manager License file name')
parser.add_option('-d', '--teardown', dest='teardown', action="store", type="string",
help='Teardown Cloudera Manager Cluster. Required arguments "keep_cluster" or "remove_cluster".')
parser.add_option('-a', '--highavailable', dest='highAvailability', action="store_true", default=False,
help='Create a High Availability cluster')
parser.add_option('-c', '--cm-user', dest='username', type="string", action='callback',
callback=cmx_args, help='Set Cloudera Manager Username')
parser.add_option('-s', '--cm-password', dest='password', type="string", action='callback',
callback=cmx_args, help='Set Cloudera Manager Password')
parser.add_option('-r', '--email-address', dest='email', type="string", action='callback',
callback=cmx_args, help='Set email address')
parser.add_option('-b', '--business-phone', dest='phone', type="string", action='callback',
callback=cmx_args, help='Set phone')
parser.add_option('-f', '--first-name', dest='fname', type="string", action='callback',
callback=cmx_args, help='Set first name')
parser.add_option('-t', '--last-name', dest='lname', type="string", action='callback',
callback=cmx_args, help='Set last name')
parser.add_option('-o', '--job-role', dest='jobrole', type="string", action='callback',
callback=cmx_args, help='Set job role')
parser.add_option('-i', '--job-function', dest='jobfunction', type="string", action='callback',
callback=cmx_args, help='Set job function')
parser.add_option('-y', '--company', dest='company', type="string", action='callback',
callback=cmx_args, help='Set company')
parser.add_option('-e', '--accept-eula', dest='accepted', action="store_true", default=False,
help='Must accept eula before install')
parser.add_option('-v', '--vmsize', dest='vmsize', type="string", action="callback",
callback=cmx_args, help='provide vmsize for setup')
(options, args) = parser.parse_args()
# Install CDH5 latest version
cmx_config_options['parcel'].append(manifest_to_dict(
'http://archive.cloudera.com/cdh5/parcels/5/manifest.json'))
msg_req_args = "Please specify the required arguments: "
if cmx_config_options['cm_server'] is None:
parser.error(msg_req_args + "-m/--cm-server")
else:
if not (cmx_config_options['ssh_private_key'] or cmx_config_options['ssh_root_password']):
parser.error(msg_req_args + "-p/--ssh-root-password or -k/--ssh-private-key")
elif cmx_config_options['host_names'] is None:
parser.error(msg_req_args + "-w/--host-names")
elif cmx_config_options['ssh_private_key'] and cmx_config_options['ssh_root_password']:
parser.error(msg_req_args + "-p/--ssh-root-password _OR_ -k/--ssh-private-key")
if (cmx_config_options['email'] is None or cmx_config_options['phone'] is None or
cmx_config_options['fname'] is None or cmx_config_options['lname'] is None or
cmx_config_options['jobrole'] is None or cmx_config_options['jobfunction'] is None or
cmx_config_options['company'] is None or
options.accepted is not True):
eula_result=display_eula()
if(eula_result):
cmx_config_options['do_post']=False
else:
parser.error(msg_req_args + 'please provide email, phone, firstname, lastname, jobrole, jobfunction, company and accept eula'+
'-r/--email-address, -b/--business-phone, -f/--first-name, -t/--last-name, -o/--job-role, -i/--job-function,'+
'-y/--company, -e/--accept-eula')
# Management services password. They are required when adding Management services
management = ManagementActions
if not (bool(management.get_mgmt_password("ACTIVITYMONITOR"))
and bool(management.get_mgmt_password("REPORTSMANAGER"))):
exit(1)
else:
cmx_config_options['amon_password'] = management.get_mgmt_password("ACTIVITYMONITOR")
cmx_config_options['rman_password'] = management.get_mgmt_password("REPORTSMANAGER")
cmx_config_options['oozie_password'] = management.get_mgmt_password("OOZIE")
cmx_config_options['hive_password'] = management.get_mgmt_password("HIVEMETASTORESERVER")
cmx = type('', (), cmx_config_options)
check = ActiveCommands()
cdh = ServiceActions
if cmx_config_options['cm_server'] and options.teardown:
if options.teardown.lower() in ['remove_cluster', 'keep_cluster']:
teardown(keep_cluster=(options.teardown.lower() == 'keep_cluster'))
print "Bye!"
exit(0)
else:
print 'Teardown Cloudera Manager Cluster. Required arguments "keep_cluster" or "remove_cluster".'
exit(1)
# Uncomment here to see cmx configuration options
# print cmx_config_options
return options
def log(msg):
print time.strftime("%X") + ": " + msg
def postEulaInfo(firstName, lastName, emailAddress, company,jobRole, jobFunction, businessPhone):
elqFormName='Cloudera_Azure_EULA'
elqSiteID='1465054361'
cid='70134000001PsLS'
url = 'https://s1465054361.t.eloqua.com/e/f2'
data = urllib.urlencode({'elqFormName': elqFormName,
'elqSiteID': elqSiteID,
'cid': cid,
'firstName': firstName,
'lastName': lastName,
'company': company,
'emailAddress': emailAddress,
'jobRole': jobRole,
'jobFunction': jobFunction,
'businessPhone': businessPhone
})
results = urllib2.urlopen(url, data)
with open('results.html', 'w') as f:
log(results.read())
def main():
# Parse user options
log("parse_options")
options = parse_options()
global diskcount
diskcount= getDataDiskCount()
log("data_disk_count"+`diskcount`)
if(cmx.do_post):
postEulaInfo(cmx.fname, cmx.lname, cmx.email, cmx.company,
cmx.jobrole, cmx.jobfunction, cmx.phone)
# Prepare Cloudera Manager Server:
# 1. Initialise Cluster and set Cluster name: 'Cluster 1'
# 3. Add hosts into: 'Cluster 1'
# 4. Deploy latest parcels into : 'Cluster 1'
log("init_cluster")
init_cluster()
log("add_hosts_to_cluster")
add_hosts_to_cluster()
# Deploy CDH Parcel
log("deploy_parcel")
deploy_parcel(parcel_product=cmx.parcel[0]['product'],
parcel_version=cmx.parcel[0]['version'])
log("setup_management")
# Example CM API to setup Cloudera Manager Management services - not installing 'ACTIVITYMONITOR'
mgmt_roles = ['SERVICEMONITOR', 'ALERTPUBLISHER', 'EVENTSERVER', 'HOSTMONITOR']
if management.licensed():
mgmt_roles.append('REPORTSMANAGER')
management(*mgmt_roles).setup()
# "START" Management roles
management(*mgmt_roles).start()
# "STOP" Management roles
# management_roles(*mgmt_services).stop()
# Upload license or Begin Trial
if options.license_file:
management.upload_license()
else:
management.begin_trial()
# Step-Through - Setup services in order of service dependencies
# Zookeeper, hdfs, HBase, Solr, Spark, Yarn,
# Hive, Sqoop, Sqoop Client, Impala, Oozie, Hue
log("setup_components")
setup_zookeeper(options.highAvailability)
setup_hdfs(options.highAvailability)
setup_yarn(options.highAvailability)
setup_spark_on_yarn()
setup_hive()
setup_impala(options.highAvailability)
setup_oozie()
setup_hue()
#setup_mapreduce(options.highAvailability)
# Note: setup_easy() is alternative to Step-Through above
# This this provides an example of alternative method of
# using CM API to setup CDH services.
# setup_easy()
# Example setting hdfs-HA and yarn-HA
# You can uncomment below after you've setup the CDH services.
# setup_hdfs_ha()
# setup_yarn_ha()
#if options.highAvailability:
# setup_hdfs_ha()
# setup_yarn_ha()
# Deploy GPL Extra Parcel
# deploy_parcel(parcel_product=cmx.parcel[1]['product'],parcel_version=cmx.parcel[1]['version'])
# Restart Cluster and Deploy Cluster wide client config
log("restart_cluster")
cdh.restart_cluster()
# Other examples of CM API
# eg: "STOP" Services or "START"
# cdh('HBASE', 'IMPALA', 'SPARK', 'SOLR', 'FLUME').stop()
# Example restarting Management Service
# management_role.restart_management()
# or Restart individual Management Roles
management(*mgmt_roles).restart()
# Stop REPORTSMANAGER Management Role
# management("REPORTSMANAGER").stop()
# Example setup Kerberos, Sentry
# setup_kerberos()
# setup_sentry()
print "Enjoy!"
if __name__ == "__main__":
print "%s" % '- ' * 20
print "Version: %s" % __version__
print "%s" % '- ' * 20
main()
# def setup_template():
# api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
# cluster = api.get_cluster(cmx.cluster_name)
# service_type = ""
# if cdh.get_service_type(service_type) is None:
# service_name = ""
# cluster.create_service(service_name.lower(), service_type)
# service = cluster.get_service(service_name)
#
# # Service-Wide
# service.update_config(cdh.dependencies_for(service))
#
# hosts = sorted([x for x in api.get_all_hosts()], key=lambda x: x.ipAddress, reverse=False)
#
# # - Default Group
# role_group = service.get_role_config_group("%s-x-BASE" % service.name)
# role_group.update_config({})
# cdh.create_service_role(service, "X", [x for x in hosts if x.id == 0][0])
#
# check.status_for_command("Starting x Service", service.start())
| mit |
benoitsteiner/tensorflow | tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py | 49 | 18431 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43], [43, 84]])
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.to_float([[48, 37], [37, 22]])
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]])
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with variable_scope.variable_scope(scope, values=[inputs]):
with arg_scope([layers.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with arg_scope(resnet_utils.resnet_arg_scope()):
with arg_scope([layers.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs, blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(variables.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training,
global_pool, output_stride, include_root_block,
reuse, scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope()):
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(
inputs, None, is_training=False, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(
inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
test.main()
| apache-2.0 |
berth64/modded_modded_1257ad | source/header_operations.py | 1 | 319871 | # -*- coding: utf-8 -*-
################################################################################
# header_operations expanded v.1.0.1 #
################################################################################
# TABLE OF CONTENTS
################################################################################
#
# [ Z00 ] Introduction and Credits.
# [ Z01 ] Operation Modifiers.
# [ Z02 ] Flow Control.
# [ Z03 ] Mathematical Operations.
# [ Z04 ] Script/Trigger Parameters and Results.
# [ Z05 ] Keyboard and Mouse Input.
# [ Z06 ] World Map.
# [ Z07 ] Game Settings.
# [ Z08 ] Factions.
# [ Z09 ] Parties and Party Templates.
# [ Z10 ] Troops.
# [ Z11 ] Quests.
# [ Z12 ] Items.
# [ Z13 ] Sounds and Music Tracks.
# [ Z14 ] Positions.
# [ Z15 ] Game Notes.
# [ Z16 ] Tableaus and Heraldics.
# [ Z17 ] String Operations.
# [ Z18 ] Output And Messages.
# [ Z19 ] Game Control: Screens, Menus, Dialogs and Encounters.
# [ Z20 ] Scenes and Missions.
# [ Z21 ] Scene Props and Prop Instances.
# [ Z22 ] Agents and Teams.
# [ Z23 ] Presentations.
# [ Z24 ] Multiplayer And Networking.
# [ Z25 ] Remaining Esoteric Stuff.
# [ Z26 ] Hardcoded Compiler-Related Code.
#
################################################################################
################################################################################
# [ Z00 ] INTRODUCTION AND CREDITS
################################################################################
# Everyone who has ever tried to mod Mount&Blade games knows perfectly well,
# that the documentation for it's Module System is severely lacking. Warband
# Module System, while introducing many new and useful operations, did not
# improve considerably in the way of documentation. What's worse, a number of
# outright errors and inconsistencies appeared between what was documented in
# the comments to the header_operations.py file (which was the root source of
# all Warband scripting documentation, whether you like it or not), and what
# was actually implemented in the game engine.
# Sooner or later someone was bound to dedicate some time and effort to fix
# this problem by properly documenting the file. It just so happened that I
# was the first person crazy enough to accept the challenge.
# I have tried to make this file a self-sufficient source of information on
# every operation that the Warband scripting engine knows of. Naturally I
# failed - there are still many operations for which there is simply not
# enough information, or operations with effects that have not yet been
# thoroughly tested and confirmed. But as far as I know, there is currently
# no other reference more exhaustive than this. I tried to make the file
# useful to both seasoned scripters and complete newbies, and to a certain
# degree this file can even serve as a tutorial into Warband scripting -
# though it still won't replace the wealth of tutorials produced by the
# Warband modding community.
# I really hope you will find it useful as well.
# Alexander Lomski AKA Lav. Jan 18th, 2012.
# And the credits.
# First of all, I should credit Taleworlds for the creation of this game and
# it's Module System. Without them, I wouldn't be able to work on this file
# so even though I'm often sceptical about their programming style and quality
# of their code, they still did a damn good job delivering this game to all
# of us.
# And then I should credit many members from the Warband modding community
# who have shared their knowledge and helped me clear out many uncertainties
# and inconsistencies. Special credits (in no particular order) go to
# cmpxchg8b, Caba'drin, SonKidd, MadVader, dunde, Ikaguia, MadocComadrin,
# Cjkjvfnby, shokkueibu.
################################################################################
# [ Z01 ] OPERATION MODIFIERS
################################################################################
neg = 0x80000000 # (neg|<operation_name>, ...),
# Used in combination with conditional operations to invert their results.
this_or_next = 0x40000000 # (this_or_next|<operation_name>, ...),
# Used in combination with conditional operations to group them into OR blocks.
################################################################################
# [ Z02 ] FLOW CONTROL
################################################################################
call_script = 1 # (call_script, <script_id>, [<script_param>...]),
# Calls specified script with or without parameters.
try_begin = 4 # (try_begin),
# Opens a conditional block.
else_try = 5 # (else_try),
# If conditional operations in the conditional block fail, this block of code will be executed.
else_try_begin = 5 # (else_try_begin),
# Deprecated form of (else_try).
try_end = 3 # (try_end),
# Concludes a conditional block or a cycle.
end_try = 3 # (end_try),
# Deprecated form of (try_end),
try_for_range = 6 # (try_for_range, <destination>, <lower_bound>, <upper_bound>),
# Runs a cycle, iterating the value in the <lower_bound>..<upper_bound>-1 range.
try_for_range_backwards = 7 # (try_for_range_backwards, <destination>, <lower_bound>, <upper_bound>),
# Same as above, but iterates the value in the opposite direction (from higher values to lower).
try_for_parties = 11 # (try_for_parties, <destination>),
# Runs a cycle, iterating all parties on the map.
try_for_agents = 12 # (try_for_agents, <destination>),
# Runs a cycle, iterating all agents on the scene.
try_for_prop_instances = 16 # (try_for_prop_instances, <destination>, [<scene_prop_id>]),
# Version 1.161+. Runs a cycle, iterating all scene prop instances on the scene, or all scene prop instances of specific type if optional parameter is provided.
try_for_players = 17 # (try_for_players, <destination>, [skip_server]),
# Version 1.165+. Iterates through all players in a multiplayer game. Set optional parameter to 1 to skip server player entry.
################################################################################
# [ Z03 ] MATHEMATICAL OPERATIONS
################################################################################
# Mathematical operations deal with numbers. Warband Module System can only
# deal with integers. Floating point numbers are emulated by the so-called
# "fixed point numbers". Wherever you encounter a fixed point parameter for
# some Module System operation, keep in mind that it is actually just a
# regular integer number, HOWEVER it is supposed to represent a floating
# point number equal to fixed_point_number / fixed_point_multiplier. As you
# might have guessed, to convert a floating point number to fixed point, you
# have to multiply it by fixed_point_multiplier. You can change the value of
# multiplier with the operation (set_fixed_point_multiplier), thus influencing
# the precision of all operations dealing with fixed point numbers.
# A notion very important for Warband modding is that you reference all
# Warband objects by their numeric values. In other words, you can do maths
# with your items, troops, agents, scenes, parties et cetera. This is used
# extensively in the code, so don't be surprised to see code looking like
# (store_add, ":value", "itm_pike", 4). This code is just calculating a
# reference to an item which is located 4 positions after "itm_pike" inside
# the module_items.py file.
# Conditional operations
gt = 32 # (gt, <value1>, <value2>),
# Checks that value1 > value2
ge = 30 # (ge, <value1>, <value2>),
# Checks that value1 >= value2
eq = 31 # (eq, <value1>, <value2>),
# Checks that value1 == value2
neq = neg|eq # (neq, <value1>, <value2>),
# Checks that value1 != value2
le = neg|gt # (le, <value1>, <value2>),
# Checks that value1 <= value2
lt = neg|ge # (lt, <value1>, <value2>),
# Checks that value1 < value2
is_between = 33 # (is_between, <value>, <lower_bound>, <upper_bound>),
# Checks that lower_bound <= value < upper_bound
# Mathematical and assignment operations
assign = 2133 # (assign, <destination>, <value>),
# Directly assigns a value to a variable or register.
store_add = 2120 # (store_add, <destination>, <value>, <value>),
# Assigns <destination> := <value> + <value>
store_sub = 2121 # (store_sub, <destination>, <value>, <value>),
# Assigns <destination> := <value> - <value>
store_mul = 2122 # (store_mul, <destination>, <value>, <value>),
# Assigns <destination> := <value> * <value>
store_div = 2123 # (store_div, <destination>, <value>, <value>),
# Assigns <destination> := <value> / <value>
store_mod = 2119 # (store_mod, <destination>, <value>, <value>),
# Assigns <destination> := <value> MOD <value>
val_add = 2105 # (val_add, <destination>, <value>),
# Assigns <destination> := <destination> + <value>
val_sub = 2106 # (val_sub, <destination>, <value>),
# Assigns <destination> := <destination> - <value>
val_mul = 2107 # (val_mul, <destination>, <value>),
# Assigns <destination> := <destination> * <value>
val_div = 2108 # (val_div, <destination>, <value>),
# Assigns <destination> := <destination> / <value>
val_mod = 2109 # (val_mod, <destination>, <value>),
# Assigns <destination> := <destination> MOD <value>
val_min = 2110 # (val_min, <destination>, <value>),
# Assigns <destination> := MIN (<destination>, <value>)
val_max = 2111 # (val_max, <destination>, <value>),
# Assigns <destination> := MAX (<destination>, <value>)
val_clamp = 2112 # (val_clamp, <destination>, <lower_bound>, <upper_bound>),
# Enforces <destination> value to be within <lower_bound>..<upper_bound>-1 range.
val_abs = 2113 # (val_abs, <destination>),
# Assigns <destination> := ABS (<destination>)
store_or = 2116 # (store_or, <destination>, <value>, <value>),
# Binary OR
store_and = 2117 # (store_and, <destination>, <value>, <value>),
# Binary AND
val_or = 2114 # (val_or, <destination>, <value>),
# Binary OR, overwriting first operand.
val_and = 2115 # (val_and, <destination>, <value>),
# Binary AND, overwriting first operand.
val_lshift = 2100 # (val_lshift, <destination>, <value>),
# Bitwise shift left (dest = dest * 2 ^ value)
val_rshift = 2101 # (val_rshift, <destination>, <value>),
# Bitwise shift right (dest = dest / 2 ^ value)
store_sqrt = 2125 # (store_sqrt, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := SQRT (value)
store_pow = 2126 # (store_pow, <destination_fixed_point>, <value_fixed_point>, <power_fixed_point),
# Assigns dest := value ^ power
store_sin = 2127 # (store_sin, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := SIN (value)
store_cos = 2128 # (store_cos, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := COS (value)
store_tan = 2129 # (store_tan, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := TAN (value)
store_asin = 2140 # (store_asin, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := ARCSIN (value)
store_acos = 2141 # (store_acos, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := ARCCOS (value)
store_atan = 2142 # (store_atan, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := ARCTAN (value)
store_atan2 = 2143 # (store_atan2, <destination_fixed_point>, <y_fixed_point>, <x_fixed_point>),
# Returns the angle between the x axis and a point with coordinates (X,Y) in degrees. Note the angle is calculated counter-clockwise, i.e. (1,1) will return 45, not -45.
# Random number generation
store_random = 2135 # (store_random, <destination>, <upper_range>),
# Stores a random value in the range of 0..<upper_range>-1. Deprecated, use (store_random_in_range) instead.
store_random_in_range = 2136 # (store_random_in_range, <destination>, <range_low>, <range_high>),
# Stores a random value in the range of <range_low>..<range_high>-1.
shuffle_range = 2134 # (shuffle_range, <reg_no>, <reg_no>),
# Randomly shuffles a range of registers, reordering the values contained in them. Commonly used for list randomization.
# Fixed point values handling
set_fixed_point_multiplier = 2124 # (set_fixed_point_multiplier, <value>),
# Affects all operations dealing with fixed point numbers. Default value is 1.
convert_to_fixed_point = 2130 # (convert_to_fixed_point, <destination_fixed_point>),
# Converts integer value to fixed point (multiplies by the fixed point multiplier).
convert_from_fixed_point = 2131 # (convert_from_fixed_point, <destination>),
# Converts fixed point value to integer (divides by the fixed point multiplier).
################################################################################
# [ Z04 ] SCRIPT/TRIGGER PARAMETERS AND RESULTS
################################################################################
# Many scripts can accept additional parameters, and many triggers have some
# parameters of their own (as details in header_triggers.py file). You can
# only pass numeric values as parameters. Since string constants are also
# Warband objects, you can pass them as well, and you can also pass string
# or position registers. However you cannot pass quick strings (string
# defined directly in the code).
# You can declare your scripts with as many parameters as you wish. Triggers,
# however, are always called with their predefined parameters. Also the game
# engine does not support more than 3 parameters per trigger. As the result,
# some triggers receive extra information which could not be fit into those
# three parameters in numeric, string or position registers.
# Some triggers and scripts called from the game engine (those have names
# starting with "game_") expect you to return some value to the game engine.
# That value may be either a number or a string and is set by special
# operations listed below. Scripts called from the Module System, however,
# typically use registers to store their return data.
# Note that if you call a script from a trigger, you can still use operations
# to retrieve trigger's calling parameters, and they will retrieve values that
# have been passed to the trigger, not values that have been passed to the
# script.
store_script_param_1 = 21 # (store_script_param_1, <destination>),
# Retrieve the value of the first script parameter.
store_script_param_2 = 22 # (store_script_param_2, <destination>),
# Retrieve the value of the second script parameter.
store_script_param = 23 # (store_script_param, <destination>, <script_param_index>),
# Retrieve the value of arbitrary script parameter (generally used when script accepts more than two). Parameters are enumerated starting from 1.
set_result_string = 60 # (set_result_string, <string>),
# Sets the return value of a game_* script, when a string value is expected by game engine.
store_trigger_param_1 = 2071 # (store_trigger_param_1, <destination>),
# Retrieve the value of the first trigger parameter. Will retrieve trigger's parameters even when called from inside a script, for as long as that script is running within trigger context.
store_trigger_param_2 = 2072 # (store_trigger_param_2, <destination>),
# Retrieve the value of the second trigger parameter. Will retrieve trigger's parameters even when called from inside a script, for as long as that script is running within trigger context.
store_trigger_param_3 = 2073 # (store_trigger_param_3, <destination>),
# Retrieve the value of the third trigger parameter. Will retrieve trigger's parameters even when called from inside a script, for as long as that script is running within trigger context.
store_trigger_param = 2070 # (store_trigger_param, <destination>, <trigger_param_no>),
# Version 1.153+. Retrieve the value of arbitrary trigger parameter. Parameters are enumerated starting from 1. Note that despite the introduction of this operation, there's not a single trigger with more than 3 parameters.
get_trigger_object_position = 702 # (get_trigger_object_position, <position>),
# Retrieve the position of an object which caused the trigger to fire (when appropriate).
set_trigger_result = 2075 # (set_trigger_result, <value>),
# Sets the return value of a trigger or game_* script, when an integer value is expected by game engine.
################################################################################
# [ Z05 ] KEYBOARD AND MOUSE INPUT
################################################################################
# The game provides modders with limited ability to control keyboard input and
# mouse movements. It is also possible to tamper with game keys (i.e. keys
# bound to specific game actions), including the ability to override game's
# reaction to those keys. Note that mouse buttons are keys too, and can be
# detected with the corresponding operations.
# Conditional operations
key_is_down = 70 # (key_is_down, <key_code>),
# Checks that the specified key is currently pressed. See header_triggers.py for key code reference.
key_clicked = 71 # (key_clicked, <key_code>),
# Checks that the specified key has just been pressed. See header_triggers.py for key code reference.
game_key_is_down = 72 # (game_key_is_down, <game_key_code>),
# Checks that the specified game key is currently pressed. See header_triggers.py for game key code reference.
game_key_clicked = 73 # (game_key_clicked, <game_key_code>),
# Checks that the specified key has just been pressed. See header_triggers.py for game key code reference.
# Generic operations
omit_key_once = 77 # (omit_key_once, <key_code>),
# Forces the game to ignore default bound action for the specified game key on current game frame.
clear_omitted_keys = 78 # (clear_omitted_keys),
# Commonly called when exiting from a presentation which made any calls to (omit_key_once). However the effects of those calls disappear by the next frame, so apparently usage of this operation is not necessary. It is still recommended to be on the safe side though.
mouse_get_position = 75 # (mouse_get_position, <position>),
# Stores mouse x and y coordinates in the specified position.
################################################################################
# [ Z06 ] WORLD MAP
################################################################################
# Generally, all operations which only make sense on the worldmap and have no
# specific category have been assembled here. These mostly deal with weather,
# time and resting.
# Conditional operations
is_currently_night = 2273 # (is_currently_night),
# Checks that it's currently night in the game.
map_free = 37 # (map_free),
# Checks that the player is currently on the global map and no game screens are open.
# Weather-handling operations
get_global_cloud_amount = 90 # (get_global_cloud_amount, <destination>),
# Returns current cloudiness (a value between 0..100).
set_global_cloud_amount = 91 # (set_global_cloud_amount, <value>),
# Sets current cloudiness (value is clamped to 0..100).
get_global_haze_amount = 92 # (get_global_haze_amount, <destination>),
# Returns current fogginess (value between 0..100).
set_global_haze_amount = 93 # (set_global_haze_amount, <value>),
# Sets current fogginess (value is clamped to 0..100).
# Time-related operations
store_current_hours = 2270 # (store_current_hours, <destination>),
# Stores number of hours that have passed since beginning of the game. Commonly used to track time when accuracy up to hours is required.
store_time_of_day = 2271 # (store_time_of_day, <destination>),
# Stores current day hour (value in 0..24 range).
store_current_day = 2272 # (store_current_day, <destination>),
# Stores number of days that have passed since beginning of the game. Commonly used to track time when high accuracy is not required.
rest_for_hours = 1030 # (rest_for_hours, <rest_time_in_hours>, [time_speed_multiplier], [remain_attackable]),
# Forces the player party to rest for specified number of hours. Time can be accelerated and player can be made immune or subject to attacks.
rest_for_hours_interactive = 1031 # (rest_for_hours_interactive, <rest_time_in_hours>, [time_speed_multiplier], [remain_attackable]),
# Forces the player party to rest for specified number of hours. Player can break the rest at any moment. Time can be accelerated and player can be made immune or subject to attacks.
################################################################################
# [ Z07 ] GAME SETTINGS AND STATISTICS
################################################################################
# This group of operations allows you to retrieve some of the game settings
# as configured by the player on Options page, and change them as necessary
# (possibly forcing a certain level of difficulty on the player). Operations
# dealing with achievements (an interesting, but underdeveloped feature of
# Warband) are also placed in this category.
# Conditional operations
is_trial_version = 250 # (is_trial_version),
# Checks if the game is in trial mode (has not been purchased). Player cannot get higher than level 6 in this mode.
is_edit_mode_enabled = 255 # (is_edit_mode_enabled),
# Version 1.153+. Checks that Edit Mode is currently enabled in the game.
# Generic operations
get_operation_set_version = 55 # (get_operation_set_version, <destination>),
# Version 1.165+. 4research. Apparently returns the current version of Module System operations set, allowing transparent support for multiple Warband engine versions.
set_player_troop = 47 # (set_player_troop, <troop_id>),
# Changes the troop player controls. Generally used in quick-battle scenarios to give player a predefined character.
show_object_details_overlay = 960 # (show_object_details_overlay, <value>),
# Turns various popup tooltips on (value = 1) and off (value = 0). This includes agent names and dropped item names during missions, item stats in inventory on mouse over, etc.
auto_save = 985 # (auto_save),
# Version 1.161+. Saves the game to the current save slot.
# Access to game options
options_get_damage_to_player = 260 # (options_get_damage_to_player, <destination>),
# 0 = 1/4, 1 = 1/2, 2 = 1/1
options_set_damage_to_player = 261 # (options_set_damage_to_player, <value>),
# 0 = 1/4, 1 = 1/2, 2 = 1/1
options_get_damage_to_friends = 262 # (options_get_damage_to_friends, <destination>),
# 0 = 1/2, 1 = 3/4, 2 = 1/1
options_set_damage_to_friends = 263 # (options_set_damage_to_friends, <value>),
# 0 = 1/2, 1 = 3/4, 2 = 1/1
options_get_combat_ai = 264 # (options_get_combat_ai, <destination>),
# 0 = good, 1 = average, 2 = poor
options_set_combat_ai = 265 # (options_set_combat_ai, <value>),
# 0 = good, 1 = average, 2 = poor
game_get_reduce_campaign_ai = 424 # (game_get_reduce_campaign_ai, <destination>),
# Deprecated operation. Use options_get_campaign_ai instead
options_get_campaign_ai = 266 # (options_get_campaign_ai, <destination>),
# 0 = good, 1 = average, 2 = poor
options_set_campaign_ai = 267 # (options_set_campaign_ai, <value>),
# 0 = good, 1 = average, 2 = poor
options_get_combat_speed = 268 # (options_get_combat_speed, <destination>),
# 0 = slowest, 1 = slower, 2 = normal, 3 = faster, 4 = fastest
options_set_combat_speed = 269 # (options_set_combat_speed, <value>),
# 0 = slowest, 1 = slower, 2 = normal, 3 = faster, 4 = fastest
options_get_battle_size = 270 # (options_get_battle_size, <destination>),
# Version 1.161+. Retrieves current battle size slider value (in the range of 0..1000). Note that this is the slider value, not the battle size itself.
options_set_battle_size = 271 # (options_set_battle_size, <value>),
# Version 1.161+. Sets battle size slider to provided value (in the range of 0..1000). Note that this is the slider value, not the battle size itself.
get_average_game_difficulty = 990 # (get_average_game_difficulty, <destination>),
# Returns calculated game difficulty rating (as displayed on the Options page). Commonly used for score calculation when ending the game.
# Achievements and kill stats
get_achievement_stat = 370 # (get_achievement_stat, <destination>, <achievement_id>, <stat_index>),
# Retrieves the numeric value associated with an achievement. Used to keep track of player's results before finally unlocking it.
set_achievement_stat = 371 # (set_achievement_stat, <achievement_id>, <stat_index>, <value>),
# Sets the new value associated with an achievement. Used to keep track of player's results before finally unlocking it.
unlock_achievement = 372 # (unlock_achievement, <achievement_id>),
# Unlocks player's achievement. Apparently doesn't have any game effects.
get_player_agent_kill_count = 1701 # (get_player_agent_kill_count, <destination>, [get_wounded]),
# Retrieves the total number of enemies killed by the player. Call with non-zero <get_wounded> parameter to retrieve the total number of knocked down enemies.
get_player_agent_own_troop_kill_count = 1705 # (get_player_agent_own_troop_kill_count, <destination>, [get_wounded]),
# Retrieves the total number of allies killed by the player. Call with non-zero <get_wounded> parameter to retrieve the total number of knocked down allies.
################################################################################
# [ Z08 ] FACTIONS
################################################################################
# Despite the importance of factions to the game, there aren't that many
# actions to deal with them. Essentially, you can control colors and name of
# existing game factions, set or retrieve relations between them, and work
# with faction slots. There's also a number of operations which assign or
# retrieve the factional allegiance of other game objects, like parties and
# troops, but these have been placed in the respective sections of the file.
# Slot operations for factions
faction_set_slot = 502 # (faction_set_slot, <faction_id>, <slot_no>, <value>),
faction_get_slot = 522 # (faction_get_slot, <destination>, <faction_id>, <slot_no>),
faction_slot_eq = 542 # (faction_slot_eq, <faction_id>, <slot_no>, <value>),
faction_slot_ge = 562 # (faction_slot_ge, <faction_id>, <slot_no>, <value>),
# Generic operations
set_relation = 1270 # (set_relation, <faction_id_1>, <faction_id_2>, <value>),
# Sets relation between two factions. Relation is in -100..100 range.
store_relation = 2190 # (store_relation, <destination>, <faction_id_1>, <faction_id_2>),
# Retrieves relation between two factions. Relation is in -100..100 range.
faction_set_name = 1275 # (faction_set_name, <faction_id>, <string>),
# Sets the name of the faction. See also (str_store_faction_name) in String Operations.
faction_set_color = 1276 # (faction_set_color, <faction_id>, <color_code>),
# Sets the faction color. All parties and centers belonging to this faction will be displayed with this color on global map.
faction_get_color = 1277 # (faction_get_color, <destination>, <faction_id>)
# Gets the faction color value.
################################################################################
# [ Z09 ] PARTIES AND PARTY TEMPLATES
################################################################################
# Parties are extremely important element of single-player modding, because
# they are the only object which can be present on the world map. Each party
# is a semi-independent object with it's own behavior. Note that you cannot
# control party's behavior directly, instead you can change various factors
# which affect party behavior (including party AI settings).
# There are two things of importance when dealing with parties. First, parties
# can be attached to each other, this allows you, for example, to stack a
# number of armies inside a single city. Second, parties may encounter each
# other. When two AI parties are in encounter, it usually means they are
# fighting. Player's encounter with an AI party is usually much more complex
# and may involve pretty much anything, which is why player's encounters are
# covered in a separate section of the file.
# Each party consists of troop stacks. Each troop stack is either a single
# hero (troop defined as tf_hero in module_troops.py file) or a number of
# regular troops (their number may vary from 1 and above). Each party has two
# sets of troop stacks: members (or companions) set of stacks, and prisoners
# set of stacks. Many operations will only affect members, others may only
# affect prisoners, and there are even operations to switch their roles.
# Another important concept is a party template. It's definition looks very
# similar to a party. Templates are used when there's a need to create a
# number of parties with similar set of members, parameters or flags. Also
# templates can be easily used to differentiate parties from each other,
# so they are akin to a "party_type" in the game.
# Note that parties are the only game object which is persistent (i.e. it
# will be saved to the savegame file and restored on load), has slots and
# can be created during runtime. This makes parties ideal candidates for
# dynamic information storage of unlimited volume, which the game otherwise
# lacks.
# Conditional operations
hero_can_join = 101 # (hero_can_join, [party_id]),
# Checks if party can accept one hero troop. Player's party is default value.
hero_can_join_as_prisoner = 102 # (hero_can_join_as_prisoner, [party_id]),
# Checks if party can accept one hero prisoner troop. Player's party is default value.
party_can_join = 103 # (party_can_join),
# During encounter dialog, checks if encountered party can join player's party.
party_can_join_as_prisoner = 104 # (party_can_join_as_prisoner),
# During encounter dialog, checks if encountered party can join player's party as prisoners.
troops_can_join = 105 # (troops_can_join, <value>),
# Checks if player party has enough space for provided number of troops.
troops_can_join_as_prisoner = 106 # (troops_can_join_as_prisoner, <value>),
# Checks if player party has enough space for provided number of prisoners..
party_can_join_party = 107 # (party_can_join_party, <joiner_party_id>, <host_party_id>, [flip_prisoners]),
# Checks if first party can join second party (enough space for both troops and prisoners). If flip_prisoners flag is 1, then members and prisoners in the joinning party are flipped.
main_party_has_troop = 110 # (main_party_has_troop, <troop_id>),
# Checks if player party has specified troop.
party_is_in_town = 130 # (party_is_in_town, <party_id>, <town_party_id>),
# Checks that the party has successfully reached it's destination (after being set to ai_bhvr_travel_to_party) and that it's destination is actually the referenced town_party_id.
party_is_in_any_town = 131 # (party_is_in_any_town, <party_id>),
# Checks that the party has successfully reached it's destination (after being set to ai_bhvr_travel_to_party).
party_is_active = 132 # (party_is_active, <party_id>),
# Checks that <party_id> is valid and not disabled.
# Slot operations for parties and party templates
party_template_set_slot = 504 # (party_template_set_slot, <party_template_id>, <slot_no>, <value>),
party_template_get_slot = 524 # (party_template_get_slot, <destination>, <party_template_id>, <slot_no>),
party_template_slot_eq = 544 # (party_template_slot_eq, <party_template_id>, <slot_no>, <value>),
party_template_slot_ge = 564 # (party_template_slot_ge, <party_template_id>, <slot_no>, <value>),
party_set_slot = 501 # (party_set_slot, <party_id>, <slot_no>, <value>),
party_get_slot = 521 # (party_get_slot, <destination>, <party_id>, <slot_no>),
party_slot_eq = 541 # (party_slot_eq, <party_id>, <slot_no>, <value>),
party_slot_ge = 561 # (party_slot_ge, <party_id>, <slot_no>, <value>),
# Generic operations
set_party_creation_random_limits = 1080 # (set_party_creation_random_limits, <min_value>, <max_value>),
# Affects party sizes spawned from templates. May be used to spawn larger parties when player is high level. Values should be in 0..100 range.
set_spawn_radius = 1103 # (set_spawn_radius, <value>),
# Sets radius for party spawning with subsequent <spawn_around_party> operations.
spawn_around_party = 1100 # (spawn_around_party, <party_id>, <party_template_id>),
# Creates a new party from a party template and puts it's <party_id> into reg0.
disable_party = 1230 # (disable_party, <party_id>),
# Party disappears from the map. Note that (try_for_parties) will still iterate over disabled parties, so you need to make additional checks with (party_is_active).
enable_party = 1231 # (enable_party, <party_id>),
# Reactivates a previously disabled party.
remove_party = 1232 # (remove_party, <party_id>),
# Destroys a party completely. Should ONLY be used with dynamically spawned parties, as removing parties pre-defined in module_parties.py file will corrupt the savegame.
party_get_current_terrain = 1608 # (party_get_current_terrain, <destination>, <party_id>),
# Returns a value from header_terrain_types.py
party_relocate_near_party = 1623 # (party_relocate_near_party, <relocated_party_id>, <target_party_id>, <spawn_radius>),
# Teleports party into vicinity of another party.
party_get_position = 1625 # (party_get_position, <dest_position>, <party_id>),
# Stores current position of the party on world map.
party_set_position = 1626 # (party_set_position, <party_id>, <position>),
# Teleports party to a specified position on the world map.
set_camera_follow_party = 1021 # (set_camera_follow_party, <party_id>),
# Self-explanatory. Can be used on world map only. Commonly used to make camera follow a party which has captured player as prisoner.
party_attach_to_party = 1660 # (party_attach_to_party, <party_id>, <party_id_to_attach_to>),
# Attach a party to another one (like lord's army staying in a town/castle).
party_detach = 1661 # (party_detach, <party_id>),
# Remove a party from attachments and place it on the world map.
party_collect_attachments_to_party = 1662 # (party_collect_attachments_to_party, <source_party_id>, <collected_party_id>),
# Mostly used in various battle and AI calculations. Will create an aggregate party from all parties attached to the source party.
party_get_cur_town = 1665 # (party_get_cur_town, <destination>, <party_id>),
# When a party has reached it's destination (using ai_bhvr_travel_to_party), this operation will retrieve the party_id of the destination party.
party_get_attached_to = 1694 # (party_get_attached_to, <destination>, <party_id>),
# Retrieves the party that the referenced party is attached to, if any.
party_get_num_attached_parties = 1695 # (party_get_num_attached_parties, <destination>, <party_id>),
# Retrieves total number of parties attached to referenced party.
party_get_attached_party_with_rank = 1696 # (party_get_attached_party_with_rank, <destination>, <party_id>, <attached_party_index>),
# Extract party_id of a specified party among attached.
party_set_name = 1669 # (party_set_name, <party_id>, <string>),
# Sets party name (will be displayed as label and/or in the party details popup).
party_set_extra_text = 1605 # (party_set_extra_text, <party_id>, <string>),
# Allows to put extra text in party details popup. Used in Native to set status for villages or towns (being raided, razed, under siege...).
party_get_icon = 1681 # (party_get_icon, <destination>, <party_id>),
# Retrieve map icon used for the party.
party_set_icon = 1676 # (party_set_icon, <party_id>, <map_icon_id>),
# Sets what map icon will be used for the party.
party_set_banner_icon = 1677 # (party_set_banner_icon, <party_id>, <map_icon_id>),
# Sets what map icon will be used as the party banner. Use 0 to remove banner from a party.
party_set_extra_icon = 1682 # (party_set_extra_icon, <party_id>, <map_icon_id>, <vertical_offset_fixed_point>, <up_down_frequency_fixed_point>, <rotate_frequency_fixed_point>, <fade_in_out_frequency_fixed_point>),
# Adds or removes an extra map icon to a party, possibly with some animations. Use -1 as map_icon_id to remove extra icon.
party_add_particle_system = 1678 # (party_add_particle_system, <party_id>, <particle_system_id>),
# Appends some special visual effects to the party on the map. Used in Native to add fire and smoke over villages.
party_clear_particle_systems = 1679 # (party_clear_particle_systems, <party_id>),
# Removes all special visual effects from the party on the map.
context_menu_add_item = 980 # (context_menu_add_item, <string_id>, <value>),
# Must be called inside script_game_context_menu_get_buttons. Adds context menu option for a party and it's respective identifier (will be passed to script_game_event_context_menu_button_clicked).
party_get_template_id = 1609 # (party_get_template_id, <destination>, <party_id>),
# Retrieves what party template was used to create the party (if any). Commonly used to identify encountered party type.
party_set_faction = 1620 # (party_set_faction, <party_id>, <faction_id>),
# Sets party faction allegiance. Party color is changed appropriately.
store_faction_of_party = 2204 # (store_faction_of_party, <destination>, <party_id>),
# Retrieves current faction allegiance of the party.
store_random_party_in_range = 2254 # (store_random_party_in_range, <destination>, <lower_bound>, <upper_bound>),
# Retrieves one random party from the range. Generally used only for predefined parties (towns, villages etc).
store01_random_parties_in_range = 2255 # (store01_random_parties_in_range, <lower_bound>, <upper_bound>),
# Stores two random, different parties in a range to reg0 and reg1. Generally used only for predefined parties (towns, villages etc).
store_distance_to_party_from_party = 2281 # (store_distance_to_party_from_party, <destination>, <party_id>, <party_id>),
# Retrieves distance between two parties on the global map.
store_num_parties_of_template = 2310 # (store_num_parties_of_template, <destination>, <party_template_id>),
# Stores number of active parties which were created using specified party template.
store_random_party_of_template = 2311 # (store_random_party_of_template, <destination>, <party_template_id>),
# Retrieves one random party which was created using specified party template. Fails if no party exists with provided template.
store_num_parties_created = 2300 # (store_num_parties_created, <destination>, <party_template_id>),
# Stores the total number of created parties of specified type. Not used in Native.
store_num_parties_destroyed = 2301 # (store_num_parties_destroyed, <destination>, <party_template_id>),
# Stores the total number of destroyed parties of specified type.
store_num_parties_destroyed_by_player = 2302 # (store_num_parties_destroyed_by_player, <destination>, <party_template_id>),
# Stores the total number of parties of specified type which have been destroyed by player.
party_get_morale = 1671 # (party_get_morale, <destination>, <party_id>),
# Returns a value in the range of 0..100. Party morale does not affect party behavior on the map, but will be taken in account if the party is engaged in battle (except auto-calc).
party_set_morale = 1672 # (party_set_morale, <party_id>, <value>),
# Value should be in the range of 0..100. Party morale does not affect party behavior on the map, but will be taken in account if the party is engaged in battle (except auto-calc).
# Party members manipulation
party_join = 1201 # (party_join),
# During encounter, joins encountered party to player's party
party_join_as_prisoner = 1202 # (party_join_as_prisoner),
# During encounter, joins encountered party to player's party as prisoners
troop_join = 1203 # (troop_join, <troop_id>),
# Specified hero joins player's party
troop_join_as_prisoner = 1204 # (troop_join_as_prisoner, <troop_id>),
# Specified hero joins player's party as prisoner
add_companion_party = 1233 # (add_companion_party, <troop_id_hero>),
# Creates a new empty party with specified hero as party leader and the only member. Party is spawned at the position of player's party.
party_add_members = 1610 # (party_add_members, <party_id>, <troop_id>, <number>),
# Returns total number of added troops in reg0.
party_add_prisoners = 1611 # (party_add_prisoners, <party_id>, <troop_id>, <number>),
# Returns total number of added prisoners in reg0.
party_add_leader = 1612 # (party_add_leader, <party_id>, <troop_id>, [number]),
# Adds troop(s) to the party and makes it party leader.
party_force_add_members = 1613 # (party_force_add_members, <party_id>, <troop_id>, <number>),
# Adds troops to party ignoring party size limits. Mostly used to add hero troops.
party_force_add_prisoners = 1614 # (party_force_add_prisoners, <party_id>, <troop_id>, <number>),
# Adds prisoners to party ignoring party size limits. Mostly used to add hero prisoners.
party_add_template = 1675 # (party_add_template, <party_id>, <party_template_id>, [reverse_prisoner_status]),
# Reinforces the party using the specified party template. Optional flag switches troop/prisoner status for reinforcements.
distribute_party_among_party_group = 1698 # (distribute_party_among_party_group, <party_to_be_distributed>, <group_root_party>),
# Distributes troops from first party among all parties attached to the second party. Commonly used to divide prisoners and resqued troops among NPC parties.
remove_member_from_party = 1210 # (remove_member_from_party, <troop_id>, [party_id]),
# Removes hero member from party. Player party is default value. Will display a message about companion leaving the party. Should not be used with regular troops (it will successfully remove one of them, but will produce some meaningless spam).
remove_regular_prisoners = 1211 # (remove_regular_prisoners, <party_id>),
# Removes all non-hero prisoners from the party.
remove_troops_from_companions = 1215 # (remove_troops_from_companions, <troop_id>, <value>),
# Removes troops from player's party, duplicating functionality of (party_remove_members) but providing less flexibility.
remove_troops_from_prisoners = 1216 # (remove_troops_from_prisoners, <troop_id>, <value>),
# Removes prisoners from player's party.
party_remove_members = 1615 # (party_remove_members, <party_id>, <troop_id>, <number>),
# Removes specified number of troops from a party. Stores number of actually removed troops in reg0.
party_remove_prisoners = 1616 # (party_remove_members, <party_id>, <troop_id>, <number>),
# Removes specified number of prisoners from a party. Stores number of actually removed prisoners in reg0.
party_clear = 1617 # (party_clear, <party_id>),
# Removes all members and prisoners from the party.
add_gold_to_party = 1070 # (add_gold_to_party, <value>, <party_id>),
# Marks the party as carrying the specified amount of gold, which can be pillaged by player if he destroys it. Operation must not be used to give gold to player's party.
# Calculating party and stack sizes
party_get_num_companions = 1601 # (party_get_num_companions, <destination>, <party_id>),
# Returns total number of party members, including leader.
party_get_num_prisoners = 1602 # (party_get_num_prisoners, <destination>, <party_id>),
# Returns total number of party prisoners.
party_count_members_of_type = 1630 # (party_count_members_of_type, <destination>, <party_id>, <troop_id>),
# Returns total number of party members of specific type.
party_count_companions_of_type = 1631 # (party_count_companions_of_type, <destination>, <party_id>, <troop_id>),
# Duplicates (party_count_members_of_type).
party_count_prisoners_of_type = 1632 # (party_count_prisoners_of_type, <destination>, <party_id>, <troop_id>),
# Returns total number of prisoners of specific type.
party_get_free_companions_capacity = 1633 # (party_get_free_companions_capacity, <destination>, <party_id>),
# Calculates how many members can be added to the party.
party_get_free_prisoners_capacity = 1634 # (party_get_free_prisoners_capacity, <destination>, <party_id>),
# Calculates how many prisoners can be added to the party.
party_get_num_companion_stacks = 1650 # (party_get_num_companion_stacks, <destination>, <party_id>),
# Returns total number of troop stacks in the party (including player and heroes).
party_get_num_prisoner_stacks = 1651 # (party_get_num_prisoner_stacks, <destination>, <party_id>),
# Returns total number of prisoner stacks in the party (including any heroes).
party_stack_get_troop_id = 1652 # (party_stack_get_troop_id, <destination>, <party_id>, <stack_no>),
# Extracts troop type of the specified troop stack.
party_stack_get_size = 1653 # (party_stack_get_size, <destination>, <party_id>, <stack_no>),
# Extracts number of troops in the specified troop stack.
party_stack_get_num_wounded = 1654 # (party_stack_get_num_wounded, <destination>, <party_id>, <stack_no>),
# Extracts number of wounded troops in the specified troop stack.
party_stack_get_troop_dna = 1655 # (party_stack_get_troop_dna, <destination>, <party_id>, <stack_no>),
# Extracts DNA from the specified troop stack. Used to properly generate appereance in conversations.
party_prisoner_stack_get_troop_id = 1656 # (party_get_prisoner_stack_troop, <destination>, <party_id>, <stack_no>),
# Extracts troop type of the specified prisoner stack.
party_prisoner_stack_get_size = 1657 # (party_get_prisoner_stack_size, <destination>, <party_id>, <stack_no>),
# Extracts number of troops in the specified prisoner stack.
party_prisoner_stack_get_troop_dna = 1658 # (party_prisoner_stack_get_troop_dna, <destination>, <party_id>, <stack_no>),
# Extracts DNA from the specified prisoner stack. Used to properly generate appereance in conversations.
store_num_free_stacks = 2154 # (store_num_free_stacks, <destination>, <party_id>),
# Deprecated, as Warband no longer has limits on number of stacks in the party. Always returns 10.
store_num_free_prisoner_stacks = 2155 # (store_num_free_prisoner_stacks, <destination>, <party_id>),
# Deprecated, as Warband no longer has limits on number of stacks in the party. Always returns 10.
store_party_size = 2156 # (store_party_size, <destination>,[party_id]),
# Stores total party size (all members and prisoners).
store_party_size_wo_prisoners = 2157 # (store_party_size_wo_prisoners, <destination>, [party_id]),
# Stores total number of members in the party (without prisoners), duplicating (party_get_num_companions).
store_troop_kind_count = 2158 # (store_troop_kind_count, <destination>, <troop_type_id>),
# Counts number of troops of specified type in player's party. Deprecated, use party_count_members_of_type instead.
store_num_regular_prisoners = 2159 # (store_num_regular_prisoners, <destination>, <party_id>),
# Deprecated and does not work. Do not use.
store_troop_count_companions = 2160 # (store_troop_count_companions, <destination>, <troop_id>, [party_id]),
# Apparently deprecated, duplicates (party_get_num_companions). Not used in Native.
store_troop_count_prisoners = 2161 # (store_troop_count_prisoners, <destination>, <troop_id>, [party_id]),
# Apparently deprecated, duplicates (party_get_num_prisoners). Not used in Native.
# Party experience and skills
party_add_xp_to_stack = 1670 # (party_add_xp_to_stack, <party_id>, <stack_no>, <xp_amount>),
# Awards specified number of xp points to a single troop stack in the party.
party_upgrade_with_xp = 1673 # (party_upgrade_with_xp, <party_id>, <xp_amount>, <upgrade_path>), #upgrade_path can be:
# Awards specified number of xp points to entire party (split between all stacks) and upgrades all eligible troops. Upgrade direction: (0 = random, 1 = first, 2 = second).
party_add_xp = 1674 # (party_add_xp, <party_id>, <xp_amount>),
# Awards specified number of xp points to entire party (split between all stacks).
party_get_skill_level = 1685 # (party_get_skill_level, <destination>, <party_id>, <skill_no>),
# Retrieves skill level for the specified party (usually max among the heroes). Makes a callback to (script_game_get_skill_modifier_for_troop).
# Combat related operations
heal_party = 1225 # (heal_party, <party_id>),
# Heals all wounded party members.
party_wound_members = 1618 # (party_wound_members, <party_id>, <troop_id>, <number>),
# Wounds a specified number of troops in the party.
party_remove_members_wounded_first = 1619 # (party_remove_members_wounded_first, <party_id>, <troop_id>, <number>),
# Removes a certain number of troops from the party, starting with wounded. Stores total number removed in reg0.
party_quick_attach_to_current_battle = 1663 # (party_quick_attach_to_current_battle, <party_id>, <side>),
# Adds any party into current encounter at specified side (0 = ally, 1 = enemy).
party_leave_cur_battle = 1666 # (party_leave_cur_battle, <party_id>),
# Forces the party to leave it's current battle (if it's engaged).
party_set_next_battle_simulation_time = 1667 # (party_set_next_battle_simulation_time, <party_id>, <next_simulation_time_in_hours>),
# Defines the period of time (in hours) after which the battle must be simulated for the specified party for the next time. When a value <= 0 is passed, the combat simulation round is performed immediately.
party_get_battle_opponent = 1680 # (party_get_battle_opponent, <destination>, <party_id>)
# When a party is engaged in battle with another party, returns it's opponent party. Otherwise returns -1.
inflict_casualties_to_party_group = 1697 # (inflict_casualties_to_party, <parent_party_id>, <damage_amount>, <party_id_to_add_causalties_to>),
# Delivers auto-calculated damage to the party (and all other parties attached to it). Killed troops are moved to another party to keep track of.
party_end_battle = 108 # (party_end_battle, <party_no>),
# Version 1.153+. UNTESTED. Supposedly ends the battle in which the party is currently participating.
# Party AI
party_set_marshall = 1604 # (party_set_marshall, <party_id>, <value>),
party_set_marshal = party_set_marshall # (party_set_marshal, <party_id>, <value>),
# Sets party as a marshall party or turns it back to normal party. Value is either 1 or 0. This affects party behavior, but exact effects are not known. Alternative operation name spelling added to enable compatibility with Viking Conquest DLC module system.
party_set_flags = 1603 # (party_set_flag, <party_id>, <flag>, <clear_or_set>),
# Sets (1) or clears (0) party flags in runtime. See header_parties.py for flags reference.
party_set_aggressiveness = 1606 # (party_set_aggressiveness, <party_id>, <number>),
# Sets aggressiveness value for the party (range 0..15).
party_set_courage = 1607 # (party_set_courage, <party_id>, <number>),
# Sets courage value for the party (range 4..15).
party_get_ai_initiative = 1638 # (party_get_ai_initiative, <destination>, <party_id>),
# Gets party current AI initiative value (range 0..100).
party_set_ai_initiative = 1639 # (party_set_ai_initiative, <party_id>, <value>),
# Sets AI initiative value for the party (range 0..100).
party_set_ai_behavior = 1640 # (party_set_ai_behavior, <party_id>, <ai_bhvr>),
# Sets AI behavior for the party. See header_parties.py for reference.
party_set_ai_object = 1641 # (party_set_ai_object, <party_id>, <object_party_id>),
# Sets another party as the object for current AI behavior (follow that party).
party_set_ai_target_position = 1642 # (party_set_ai_target_position, <party_id>, <position>),
# Sets a specific world map position as the object for current AI behavior (travel to that point).
party_set_ai_patrol_radius = 1643 # (party_set_ai_patrol_radius, <party_id>, <radius_in_km>),
# Sets a radius for AI patrolling behavior.
party_ignore_player = 1644 # (party_ignore_player, <party_id>, <duration_in_hours>),
# Makes AI party ignore player for the specified time.
party_set_bandit_attraction = 1645 # (party_set_bandit_attraction, <party_id>, <attaraction>),
# Sets party attractiveness to parties with bandit behavior (range 0..100).
party_get_helpfulness = 1646 # (party_get_helpfulness, <destination>, <party_id>),
# Gets party current AI helpfulness value (range 0..100).
party_set_helpfulness = 1647 # (party_set_helpfulness, <party_id>, <number>),
# Sets AI helpfulness value for the party (range 0..10000, default 100).
get_party_ai_behavior = 2290 # (get_party_ai_behavior, <destination>, <party_id>),
# Retrieves current AI behavior pattern for the party.
get_party_ai_object = 2291 # (get_party_ai_object, <destination>, <party_id>),
# Retrieves what party is currently used as object for AI behavior.
party_get_ai_target_position = 2292 # (party_get_ai_target_position, <position>, <party_id>),
# Retrieves what position is currently used as object for AI behavior.
get_party_ai_current_behavior = 2293 # (get_party_ai_current_behavior, <destination>, <party_id>),
# Retrieves current AI behavior pattern when it was overridden by current situation (fleeing from enemy when en route to destination).
get_party_ai_current_object = 2294 # (get_party_ai_current_object, <destination>, <party_id>),
# Retrieves what party has caused temporary behavior switch.
party_set_ignore_with_player_party = 1648 # (party_set_ignore_with_player_party, <party_id>, <value>),
# Version 1.161+. Effects uncertain. 4research
party_get_ignore_with_player_party = 1649 # (party_get_ignore_with_player_party, <party_id>),
# Version 1.161+. Effects uncertain. Documented official syntax is suspicious and probably incorrect. 4research
################################################################################
# [ Z10 ] TROOPS
################################################################################
# What troops are.
# There are two major types of troops: heroes and regulars. They are treated
# very differently by the game, so it's important not to confuse them. At the
# same time, most Module System operations will not make any differentiation
# between hero and regular troops.
# First of all, hero troops do not stack. You cannot have a stack of heroes
# in a party, each hero will always occupy a separate troop slot. At the same
# time, you can put any number of regular troops into a single troop slot.
# Second, the way the game treats equipment of heroes and troops is also
# different. All heroes' items are treated in the same way as player's (no
# big surprise, since player is actually a hero troop himself). Meanwhile,
# items that the troop has are just suggestions for what this troop *might*
# take into battle. On the battlefield, each agent spawned from the regular
# troop, will only take a limited number of items from the inventory provided
# by the troop definition in module_troops.py. Choice is absolutely random and
# modder has only limited control over it through the use of guarantee flags.
# There's one more additional caveat: while you can easily change the outfit
# of a hero troop and your changes will persist through the game, same applies
# to regular troops. In other words, by changing equipment of some regular
# troop, you are changing all instances of that troop throughout the entire
# game. In other words, you cannot re-equip a stack of regulars in a single
# party - your changes will affect all parties in the world.
# Third, while all heroes have a single predefined face code, which is used
# consistently through the game, troops have entire range of face codes. This
# range is used to randomize each agent's face within those constraints, so a
# group of 12 pikemen will not look like a bunch of clones.
# Fourth, hero troops can't be killed in battle. Every time hero's hit points
# are reduced to zero, hero is always knocked down. For regular troops, chance
# to be knocked down depends on the number of factors, but their default fate
# when driven to zero health is death.
# Conditional operators
troop_has_item_equipped = 151 # (troop_has_item_equipped, <troop_id>, <item_id>),
# Checks that the troop has this item equipped (worn or wielded).
troop_is_mounted = 152 # (troop_is_mounted, <troop_id>),
# Checks the troop for tf_mounted flag (see header_troops.py). Does NOT check that the troop has a horse.
troop_is_guarantee_ranged = 153 # (troop_is_guarantee_ranged, <troop_id>),
# Checks the troop for tf_guarantee_ranged flag (see header_troops.py). Does not check that troop actually has some ranged weapon.
troop_is_guarantee_horse = 154 # (troop_is_guarantee_horse, <troop_id>),
# Checks the troop for tf_guarantee_horse flag (see header_troops.py). Does not check that troop actually has some horse.
troop_is_hero = 1507 # (troop_is_hero, <troop_id>),
# Checks the troop for tf_hero flag (see header_troops.py). Hero troops are actual characters and do not stack in party window.
troop_is_wounded = 1508 # (troop_is_wounded, <troop_id>),
# Checks that the troop is wounded. Only works for hero troops.
player_has_item = 150 # (player_has_item, <item_id>),
# Checks that player has the specified item.
# Slot operations for troops
troop_set_slot = 500 # (troop_set_slot, <troop_id>, <slot_no>, <value>),
troop_get_slot = 520 # (troop_get_slot, <destination>, <troop_id>, <slot_no>),
troop_slot_eq = 540 # (troop_slot_eq, <troop_id>, <slot_no>, <value>),
troop_slot_ge = 560 # (troop_slot_ge, <troop_id>, <slot_no>, <value>),
# Troop attributes and skills
troop_set_type = 1505 # (troop_set_type, <troop_id>, <gender>),
# Changes the troop skin. There are two skins in Native: male and female, so in effect this operation sets troop gender. However mods may declare other skins.
troop_get_type = 1506 # (troop_get_type, <destination>, <troop_id>),
# Returns troop current skin (i.e. gender).
troop_set_class = 1517 # (troop_set_class, <troop_id>, <value>),
# Sets troop class (infantry, archers, cavalry or any of custom classes). Accepts values in range 0..8. See grc_* constants in header_mission_templates.py.
troop_get_class = 1516 # (troop_get_class, <destination>, <troop_id>),
# Retrieves troop class. Returns values in range 0..8.
class_set_name = 1837 # (class_set_name, <sub_class>, <string_id>),
# Sets a new name for troop class (aka "Infantry", "Cavalry", "Custom Group 3"...).
add_xp_to_troop = 1062 # (add_xp_to_troop, <value>, [troop_id]),
# Adds some xp points to troop. Only makes sense for player and hero troops. Default troop_id is player. Amount of xp can be negative.
add_xp_as_reward = 1064 # (add_xp_as_reward, <value>),
# Adds the specified amount of xp points to player. Typically used as a quest reward operation.
troop_get_xp = 1515 # (troop_get_xp, <destination>, <troop_id>),
# Retrieves total amount of xp specified troop has.
store_attribute_level = 2172 # (store_attribute_level, <destination>, <troop_id>, <attribute_id>),
# Stores current value of troop attribute. See ca_* constants in header_troops.py for reference.
troop_raise_attribute = 1520 # (troop_raise_attribute, <troop_id>, <attribute_id>, <value>),
# Increases troop attribute by the specified amount. See ca_* constants in header_troops.py for reference. Use negative values to reduce attributes. When used on non-hero troop, will affect all instances of that troop.
store_skill_level = 2170 # (store_skill_level, <destination>, <skill_id>, [troop_id]),
# Stores current value of troop skill. See header_skills.py for reference.
troop_raise_skill = 1521 # (troop_raise_skill, <troop_id>, <skill_id>, <value>),
# Increases troop skill by the specified value. Value can be negative. See header_skills.py for reference. When used on non-hero troop, will affect all instances of that troop.
store_proficiency_level = 2176 # (store_proficiency_level, <destination>, <troop_id>, <attribute_id>),
# Stores current value of troop weapon proficiency. See wpt_* constants in header_troops.py for reference.
troop_raise_proficiency = 1522 # (troop_raise_proficiency, <troop_id>, <proficiency_no>, <value>),
# Increases troop weapon proficiency by the specified value. Value can be negative. Increase is subject to limits defined by Weapon Master skill. When used on non-hero troop, will affect all instances of that troop.
troop_raise_proficiency_linear = 1523 # (troop_raise_proficiency, <troop_id>, <proficiency_no>, <value>),
# Same as (troop_raise_proficiency), but does not take Weapon Master skill into account (i.e. can increase proficiencies indefinitely).
troop_add_proficiency_points = 1525 # (troop_add_proficiency_points, <troop_id>, <value>),
# Adds some proficiency points to a hero troop which can later be distributed by player.
store_troop_health = 2175 # (store_troop_health, <destination>, <troop_id>, [absolute]), # set absolute to 1 to get actual health; otherwise this will return percentage health in range (0-100)
# Retrieves current troop health. Use absolute = 1 to retrieve actual number of hp points left, use absolute = 0 to retrieve a value in 0..100 range (percentage).
troop_set_health = 1560 # (troop_set_health, <troop_id>, <relative health (0-100)>),
# Sets troop health. Accepts value in range 0..100 (percentage).
troop_get_upgrade_troop = 1561 # (troop_get_upgrade_troop, <destination>, <troop_id>, <upgrade_path>),
# Retrieves possible directions for non-hero troop upgrade. Use 0 to retrieve first upgrade path, and 1 to return second. Result of -1 means there's no such upgrade path for this troop.
store_character_level = 2171 # (store_character_level, <destination>, [troop_id]),
# Retrieves character level of the troop. Default troop is the player.
get_level_boundary = 991 # (get_level_boundary, <destination>, <level_no>),
# Returns the amount of experience points required to reach the specified level (will return 0 for 1st level). Maximum possible level in the game is 63.
add_gold_as_xp = 1063 # (add_gold_as_xp, <value>, [troop_id]), # Default troop is player
# Adds a certain amount of experience points, depending on the amount of gold specified. Conversion rate is unclear and apparently somewhat randomized (three runs with 1000 gold produced values 1091, 804 and 799).
# Troop equipment handling
troop_set_auto_equip = 1509 # (troop_set_auto_equip, <troop_id>, <value>),
# Sets (value = 1) or disables (value = 0) auto-equipping the troop with any items added to it's inventory or purchased. Similar to tf_is_merchant flag.
troop_ensure_inventory_space = 1510 # (troop_ensure_inventory_space, <troop_id>, <value>),
# Removes items from troop inventory until troop has specified number of free inventory slots. Will free inventory slots starting from the end (items at the bottom of inventory will be removed first if there's not enough free space).
troop_sort_inventory = 1511 # (troop_sort_inventory, <troop_id>),
# Sorts items in troop inventory by their price (expensive first).
troop_add_item = 1530 # (troop_add_item, <troop_id>, <item_id>, [modifier]),
# Adds an item to the troop, optionally with a modifier (see imod_* constants in header_item_modifiers.py).
troop_remove_item = 1531 # (troop_remove_item, <troop_id>, <item_id>),
# Removes an item from the troop equipment or inventory. Operation will remove first matching item it finds.
troop_clear_inventory = 1532 # (troop_clear_inventory, <troop_id>),
# Clears entire troop inventory. Does not affect equipped items.
troop_equip_items = 1533 # (troop_equip_items, <troop_id>),
# Makes the troop reconsider it's equipment. If troop has better stuff in it's inventory, he will equip it. Note this operation sucks with weapons and may force the troop to equip himself with 4 two-handed swords.
troop_inventory_slot_set_item_amount = 1534 # (troop_inventory_slot_set_item_amount, <troop_id>, <inventory_slot_no>, <value>),
# Sets the stack size for a specified equipment or inventory slot. Only makes sense for items like ammo or food (which show stuff like "23/50" in inventory). Equipment slots are in range 0..9, see ek_* constants in header_items.py for reference.
troop_inventory_slot_get_item_amount = 1537 # (troop_inventory_slot_get_item_amount, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the stack size for a specified equipment or inventory slot (if some Bread is 23/50, this operation will return 23).
troop_inventory_slot_get_item_max_amount = 1538 # (troop_inventory_slot_get_item_max_amount, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the maximum possible stack size for a specified equipment or inventory slot (if some Bread is 23/50, this operation will return 50).
troop_add_items = 1535 # (troop_add_items, <troop_id>, <item_id>, <number>),
# Adds multiple items of specified type to the troop.
troop_remove_items = 1536 # (troop_remove_items, <troop_id>, <item_id>, <number>),
# Removes multiple items of specified type from the troop. Total price of actually removed items will be stored in reg0.
troop_loot_troop = 1539 # (troop_loot_troop, <target_troop>, <source_troop_id>, <probability>),
# Adds to target_troop's inventory some items from source_troop's equipment and inventory with some probability. Does not actually remove items from source_troop. Commonly used in Native to generate random loot after the battle.
troop_get_inventory_capacity = 1540 # (troop_get_inventory_capacity, <destination>, <troop_id>),
# Returns the total inventory capacity (number of inventory slots) for the specified troop. Note that this number will include equipment slots as well. Substract num_equipment_kinds (see header_items.py) to get the number of actual *inventory* slots.
troop_get_inventory_slot = 1541 # (troop_get_inventory_slot, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the item_id of a specified equipment or inventory slot. Returns -1 when there's nothing there.
troop_get_inventory_slot_modifier = 1542 # (troop_get_inventory_slot_modifier, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the modifier value (see imod_* constants in header_items.py) for an item in the specified equipment or inventory slot. Returns 0 when there's nothing there, or if item does not have any modifiers.
troop_set_inventory_slot = 1543 # (troop_set_inventory_slot, <troop_id>, <inventory_slot_no>, <item_id>),
# Puts the specified item into troop's equipment or inventory slot. Be careful with setting equipment slots this way.
troop_set_inventory_slot_modifier = 1544 # (troop_set_inventory_slot_modifier, <troop_id>, <inventory_slot_no>, <imod_value>),
# Sets the modifier for the item in the troop's equipment or inventory slot. See imod_* constants in header_items.py for reference.
store_item_kind_count = 2165 # (store_item_kind_count, <destination>, <item_id>, [troop_id]),
# Calculates total number of items of specified type that the troop has. Default troop is player.
store_free_inventory_capacity = 2167 # (store_free_inventory_capacity, <destination>, [troop_id]),
# Calculates total number of free inventory slots that the troop has. Default troop is player.
# Merchandise handling
reset_price_rates = 1170 # (reset_price_rates),
# Resets customized price rates for merchants.
set_price_rate_for_item = 1171 # (set_price_rate_for_item, <item_id>, <value_percentage>),
# Sets individual price rate for a single item type. Normal price rate is 100. Deprecated, as Warband uses (game_get_item_[buy/sell]_price_factor) scripts instead.
set_price_rate_for_item_type = 1172 # (set_price_rate_for_item_type, <item_type_id>, <value_percentage>),
# Sets individual price rate for entire item class (see header_items.py for itp_type_* constants). Normal price rate is 100. Deprecated, as Warband uses (game_get_item_[buy/sell]_price_factor) scripts instead.
set_merchandise_modifier_quality = 1490 # (set_merchandise_modifier_quality, <value>),
# Affects the probability of items with quality modifiers appearing in merchandise. Value is percentage, standard value is 100.
set_merchandise_max_value = 1491 # (set_merchandise_max_value, <value>),
# Not used in Native. Apparently prevents items with price higher than listed from being generated as merchandise.
reset_item_probabilities = 1492 # (reset_item_probabilities, <value>),
# Sets all items probability of being generated as merchandise to the provided value. Use zero with subsequent calls to (set_item_probability_in_merchandise) to only allow generation of certain items.
set_item_probability_in_merchandise = 1493 # (set_item_probability_in_merchandise, <item_id>, <value>),
# Sets item probability of being generated as merchandise to the provided value.
troop_add_merchandise = 1512 # (troop_add_merchandise, <troop_id>, <item_type_id>, <value>),
# Adds a specified number of random items of certain type (see itp_type_* constants in header_items.py) to troop inventory. Only adds items with itp_merchandise flags.
troop_add_merchandise_with_faction = 1513 # (troop_add_merchandise_with_faction, <troop_id>, <faction_id>, <item_type_id>, <value>), #faction_id is given to check if troop is eligible to produce that item
# Same as (troop_add_merchandise), but with additional filter: only adds items which belong to specified faction, or without any factions at all.
# Miscellaneous troop information
troop_set_name = 1501 # (troop_set_name, <troop_id>, <string_no>),
# Renames the troop, setting a new singular name for it.
troop_set_plural_name = 1502 # (troop_set_plural_name, <troop_id>, <string_no>),
# Renames the troop, setting a new plural name for it.
troop_set_face_key_from_current_profile = 1503 # (troop_set_face_key_from_current_profile, <troop_id>),
# Forces the troop to adopt the face from player's currently selected multiplayer profile.
troop_add_gold = 1528 # (troop_add_gold, <troop_id>, <value>),
# Adds gold to troop. Generally used with player or hero troops.
troop_remove_gold = 1529 # (troop_remove_gold, <troop_id>, <value>),
# Removes gold from troop. Generally used with player or hero troops.
store_troop_gold = 2149 # (store_troop_gold, <destination>, <troop_id>),
# Retrieves total number of gold that the troop has.
troop_set_faction = 1550 # (troop_set_faction, <troop_id>, <faction_id>),
# Sets a new faction for the troop (mostly used to switch lords allegiances in Native).
store_troop_faction = 2173 # (store_troop_faction, <destination>, <troop_id>),
# Retrieves current troop faction allegiance.
store_faction_of_troop = 2173 # (store_troop_faction, <destination>, <troop_id>),
# Alternative spelling of the above operation.
troop_set_age = 1555 # (troop_set_age, <troop_id>, <age_slider_pos>),
# Defines a new age for the troop (will be used by the game engine to generate appropriately aged face). Age is in range 0.100.
store_troop_value = 2231 # (store_troop_value, <destination>, <troop_id>),
# Stores some value which is apparently related to troop's overall fighting value. Swadian infantry line troops from Native produced values 24, 47, 80, 133, 188. Calling on player produced 0.
# Troop face code handling
str_store_player_face_keys = 2747 # (str_store_player_face_keys, <string_no>, <player_id>),
# Version 1.161+. Stores player's face keys into string register.
player_set_face_keys = 2748 # (player_set_face_keys, <player_id>, <string_no>),
# Version 1.161+. Sets player's face keys from string.
str_store_troop_face_keys = 2750 # (str_store_troop_face_keys, <string_no>, <troop_no>, [<alt>]),
# Version 1.161+. Stores specified troop's face keys into string register. Use optional <alt> parameter to determine what facekey set to retrieve: 0 for first and 1 for second.
troop_set_face_keys = 2751 # (troop_set_face_keys, <troop_no>, <string_no>, [<alt>]),
# Version 1.161+. Sets troop face keys from string. Use optional <alt> parameter to determine what face keys to update: 0 for first and 1 for second.
face_keys_get_hair = 2752 # (face_keys_get_hair, <destination>, <string_no>),
# Version 1.161+. Unpacks selected hair mesh from string containing troop/player face keys to <destination>.
face_keys_set_hair = 2753 # (face_keys_set_hair, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new hair value. Hair meshes associated with skin (as defined in module_skins) are numbered from 1. Use 0 for no hair.
face_keys_get_beard = 2754 # (face_keys_get_beard, <destination>, <string_no>),
# Version 1.161+. Unpacks selected beard mesh from string containing troop/player face keys to <destination>.
face_keys_set_beard = 2755 # (face_keys_set_beard, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new beard value. Beard meshes associated with skin (as defined in module_skins) are numbered from 1. Use 0 for no beard.
face_keys_get_face_texture = 2756 # (face_keys_get_face_texture, <destination>, <string_no>),
# Version 1.161+. Unpacks selected face texture from string containing troop/player face keys to <destination>.
face_keys_set_face_texture = 2757 # (face_keys_set_face_texture, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new face texture value. Face textures associated with skin (as defined in module_skins) are numbered from 0.
face_keys_get_hair_texture = 2758 # (face_keys_get_hair_texture, <destination>, <string_no>),
# Version 1.161+. Unpacks selected hair texture from string containing troop/player face keys to <destination>. Apparently hair textures have no effect. 4 research.
face_keys_set_hair_texture = 2759 # (face_keys_set_hair_texture, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new hair texture value. Doesn't seem to have an effect. 4research.
face_keys_get_hair_color = 2760 # (face_keys_get_hair_color, <destination>, <string_no>),
# Version 1.161+. Unpacks hair color slider value from face keys string. Values are in the range of 0..63. Mapping to specific colors depends on the hair color range defined for currently selected skin / face_texture combination.
face_keys_set_hair_color = 2761 # (face_keys_set_hair_color, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new hair color slider value. Value should be in the 0..63 range.
face_keys_get_age = 2762 # (face_keys_get_age, <destination>, <string_no>),
# Version 1.161+. Unpacks age slider value from face keys string. Values are in the range of 0..63.
face_keys_set_age = 2763 # (face_keys_set_age, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new age slider value. Value should be in the 0..63 range.
face_keys_get_skin_color = 2764 # (face_keys_get_skin_color, <destination>, <string_no>),
# Version 1.161+. Apparently doesn't work. Should retrieve skin color value from face keys string into <destination>.
face_keys_set_skin_color = 2765 # (face_keys_set_skin_color, <string_no>, <value>),
# Version 1.161+. Apparently doesn't work. Should update face keys string with a new skin color value.
face_keys_get_morph_key = 2766 # (face_keys_get_morph_key, <destination>, <string_no>, <key_no>),
# Version 1.161+. Unpacks morph key value from face keys string. See morph key indices in module_skins.py file. Note that only 8 out of 27 morph keys are actually accessible (from 'chin_size' to 'cheeks'). Morph key values are in the 0..7 range.
face_keys_set_morph_key = 2767 # (face_keys_set_morph_key, <string_no>, <key_no>, <value>),
# Version 1.161+. Updates face keys string with a new morph key value. See morph key indices in module_skins.py file. Note that only 8 out of 27 morph keys are actually accessible (from 'chin_size' to 'cheeks'). Morph key values should be in the 0..7 range.
################################################################################
# [ Z11 ] QUESTS
################################################################################
# Quests are just that: some tasks that characters in the game world want the
# player to do. It's interesting to note that in Warband quests can have three
# possible outcomes: success, failure and conclusion. Generally the last
# option is used to indicate some "intermediate" quest result, which is
# neither a full success, nor a total failure.
# Conditional operations
check_quest_active = 200 # (check_quest_active, <quest_id>),
# Checks that the quest has been started but not yet cancelled or completed. Will not fail for concluded, failed or succeeded quests for as long as they have not yet been completed.
check_quest_finished = 201 # (check_quest_finished, <quest_id>),
# Checks that the quest has been completed (result does not matter) and not taken again yet.
check_quest_succeeded = 202 # (check_quest_succeeded, <quest_id>),
# Checks that the quest has succeeded and not taken again yet (check will be successful even after the quest is completed).
check_quest_failed = 203 # (check_quest_failed, <quest_id>),
# Checks that the quest has failed and not taken again yet (check will be successful even after the quest is completed).
check_quest_concluded = 204 # (check_quest_concluded, <quest_id>),
# Checks that the quest was concluded with any result and not taken again yet.
# Slot operations for quests
quest_set_slot = 506 # (quest_set_slot, <quest_id>, <slot_no>, <value>),
quest_get_slot = 526 # (quest_get_slot, <destination>, <quest_id>, <slot_no>),
quest_slot_eq = 546 # (quest_slot_eq, <quest_id>, <slot_no>, <value>),
quest_slot_ge = 566 # (quest_slot_ge, <quest_id>, <slot_no>, <value>),
# Quest management
start_quest = 1280 # (start_quest, <quest_id>, <giver_troop_id>),
# Starts the quest and marks giver_troop as the troop who gave it.
conclude_quest = 1286 # (conclude_quest, <quest_id>),
# Sets quest status as concluded but keeps it in the list. Frequently used to indicate "uncertain" quest status, when it's neither fully successful nor a total failure.
succeed_quest = 1282 # (succeed_quest, <quest_id>), #also concludes the quest
# Sets quest status as successful but keeps it in the list (player must visit quest giver to complete it before he can get another quest of the same type).
fail_quest = 1283 # (fail_quest, <quest_id>), #also concludes the quest
# Sets quest status as failed but keeps it in the list (player must visit quest giver to complete it before he can get another quest of the same type).
complete_quest = 1281 # (complete_quest, <quest_id>),
# Successfully completes specified quest, removing it from the list of active quests.
cancel_quest = 1284 # (cancel_quest, <quest_id>),
# Cancels specified quest without completing it, removing it from the list of active quests.
setup_quest_text = 1290 # (setup_quest_text, <quest_id>),
# Operation will refresh default quest description (as defined in module_quests.py). This is important when quest description contains references to variables and registers which need to be initialized with their current values.
store_partner_quest = 2240 # (store_partner_quest, <destination>),
# During conversation, if there's a quest given by conversation partner, the operation will return it's id.
setup_quest_giver = 1291 # (setup_quest_giver, <quest_id>, <string_id>),
# Apparently deprecated, as quest giver troop is now defined as a parameter of (start_quest).
store_random_quest_in_range = 2250 # (store_random_quest_in_range, <destination>, <lower_bound>, <upper_bound>),
# Apparently deprecated as the logic for picking a new quest has been moved to module_scripts.
set_quest_progression = 1285 # (set_quest_progression, <quest_id>, <value>),
# Deprecated and useless, operation has no game effects and it's impossible to retrieve quest progression status anyway.
store_random_troop_to_raise = 2251 # (store_random_troop_to_raise, <destination>, <lower_bound>, <upper_bound>),
# Apparently deprecated.
store_random_troop_to_capture = 2252 # (store_random_troop_to_capture, <destination>, <lower_bound>, <upper_bound>),
# Apparently deprecated.
store_quest_number = 2261 # (store_quest_number, <destination>, <quest_id>),
# Apparently deprecated.
store_quest_item = 2262 # (store_quest_item, <destination>, <item_id>),
# Apparently deprecated. Native now uses quest slots to keep track of this information.
store_quest_troop = 2263 # (store_quest_troop, <destination>, <troop_id>),
# Apparently deprecated. Native now uses quest slots to keep track of this information.
################################################################################
# [ Z12 ] ITEMS
################################################################################
# The title is a bit deceitful here. Items, despite the name, are not actual
# game items. Rather these are the *definitions* for real game items, and you
# can frequently see them referenced as "item types". However you should not
# confuse this with so called itp_type_* constants which define the major item
# classes existing in the game.
# Consider this: a Smoked Fish (50/50) in your character's inventory is an
# item in the game world. It's item type is "itm_smoked_fish" and it's basic
# class is itp_type_food. So take care: operations in this section are dealing
# with "itm_smoked_fish", not with actual fish in your inventory. The latter
# is actually just an inventory slot from the Module System's point of view,
# and operations to work with it are in the troops section of the file.
# Conditional operations
item_has_property = 2723 # (item_has_property, <item_kind_no>, <property>),
# Version 1.161+. Check that the item has specified property flag set. See the list of itp_* flags in header_items.py.
item_has_capability = 2724 # (item_has_capability, <item_kind_no>, <capability>),
# Version 1.161+. Checks that the item has specified capability flag set. See the list of itcf_* flags in header_items.py
item_has_modifier = 2725 # (item_has_modifier, <item_kind_no>, <item_modifier_no>),
# Version 1.161+. Checks that the specified modifiers is valid for the item. See the list of imod_* values in header_item_modifiers.py.
item_has_faction = 2726 # (item_has_faction, <item_kind_no>, <faction_no>),
# Version 1.161+. Checks that the item is available for specified faction. Note that an item with no factions set is available to all factions.
# Item slot operations
item_set_slot = 507 # (item_set_slot, <item_id>, <slot_no>, <value>),
item_get_slot = 527 # (item_get_slot, <destination>, <item_id>, <slot_no>),
item_slot_eq = 547 # (item_slot_eq, <item_id>, <slot_no>, <value>),
item_slot_ge = 567 # (item_slot_ge, <item_id>, <slot_no>, <value>),
# Generic item operations
item_get_type = 1570 # (item_get_type, <destination>, <item_id>),
# Returns item class (see header_items.py for itp_type_* constants).
store_item_value = 2230 # (store_item_value, <destination>, <item_id>),
# Stores item nominal price as listed in module_items.py. Does not take item modifier or quantity (for food items) into account.
store_random_horse = 2257 # (store_random_horse, <destination>),
# Deprecated since early M&B days.
store_random_equipment = 2258 # (store_random_equipment, <destination>),
# Deprecated since early M&B days.
store_random_armor = 2259 # (store_random_armor, <destination>),
# Deprecated since early M&B days.
cur_item_add_mesh = 1964 # (cur_item_add_mesh, <mesh_name_string>, [<lod_begin>], [<lod_end>]),
# Version 1.161+. Only call inside ti_on_init_item trigger. Adds another mesh to item, allowing the creation of combined items. Parameter <mesh_name_string> should contain mesh name itself, NOT a mesh reference. LOD values are optional. If <lod_end> is used, it will not be loaded.
cur_item_set_material = 1978 # (cur_item_set_material, <string_no>, <sub_mesh_no>, [<lod_begin>], [<lod_end>]),
# Version 1.161+. Only call inside ti_on_init_item trigger. Replaces material that will be used to render the item mesh. Use 0 for <sub_mesh_no> to replace material for base mesh. LOD values are optional. If <lod_end> is used, it will not be loaded.
item_get_weight = 2700 # (item_get_weight, <destination_fixed_point>, <item_kind_no>),
# Version 1.161+. Retrieves item weight as a fixed point value.
item_get_value = 2701 # (item_get_value, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item base price. Essentially a duplicate of (store_item_value).
item_get_difficulty = 2702 # (item_get_difficulty, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item difficulty value.
item_get_head_armor = 2703 # (item_get_head_armor, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item head armor value.
item_get_body_armor = 2704 # (item_get_body_armor, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item body armor value.
item_get_leg_armor = 2705 # (item_get_leg_armor, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item leg armor value.
item_get_hit_points = 2706 # (item_get_hit_points, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item hit points amount.
item_get_weapon_length = 2707 # (item_get_weapon_length, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item length (for weapons) or shield half-width (for shields). To get actual shield width, multiply this value by 2. Essentially, it is a distance from shield's "center" point to it's left, right and top edges (and bottom edge as well if shield height is not defined).
item_get_speed_rating = 2708 # (item_get_speed_rating, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item speed rating.
item_get_missile_speed = 2709 # (item_get_missile_speed, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item missile speed rating.
item_get_max_ammo = 2710 # (item_get_max_ammo, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item max ammo amount.
item_get_accuracy = 2711 # (item_get_accuracy, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item accuracy value. Note that this operation will return 0 for an item with undefined accuracy, even though the item accuracy will actually default to 100.
item_get_shield_height = 2712 # (item_get_shield_height, <destination_fixed_point>, <item_kind_no>),
# Version 1.161+. Retrieves distance from shield "center" to it's bottom edge as a fixed point number. Use (set_fixed_point_multiplier, 100), to retrieve the correct value with this operation. To get actual shield height, use shield_height + weapon_length if this operation returns a non-zero value, otherwise use 2 * weapon_length.
item_get_horse_scale = 2713 # (item_get_horse_scale, <destination_fixed_point>, <item_kind_no>),
# Version 1.161+. Retrieves horse scale value as fixed point number.
item_get_horse_speed = 2714 # (item_get_horse_speed, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves horse speed value.
item_get_horse_maneuver = 2715 # (item_get_horse_maneuver, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves horse maneuverability value.
item_get_food_quality = 2716 # (item_get_food_quality, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves food quality coefficient (as of Warband 1.165, this coefficient is actually set for many food items, but never used in the code as there was no way to retrieve this coeff before 1.161 patch).
item_get_abundance = 2717 # (item_get_abundance, <destination>, <item_kind_no>),
# Version 1.161+. Retrieve item abundance value. Note that this operation will return 0 for an item with undefined abundance, even though the item abundance will actually default to 100.
item_get_thrust_damage = 2718 # (item_get_thrust_damage, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves thrust base damage value for item.
item_get_thrust_damage_type = 2719 # (item_get_thrust_damage_type, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves thrust damage type for item (see definitions for "cut", "pierce" and "blunt" in header_items.py).
item_get_swing_damage = 2720 # (item_get_swing_damage, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves swing base damage value for item.
item_get_swing_damage_type = 2721 # (item_get_swing_damage_type, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves swing damage type for item (see definitions for "cut", "pierce" and "blunt" in header_items.py).
item_get_horse_charge_damage = 2722 # (item_get_horse_charge_damage, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves horse charge base damage.
################################################################################
# [ Z13 ] SOUNDS AND MUSIC TRACKS
################################################################################
# There are two types of sound in the game: sounds and tracks. Sounds are just
# short sound effects. They can be positional (i.e. emitted by some object on
# the scene or by player's opponent during the dialog). They can be generic
# sound effects, like playing some drums when player meets mountain bandits.
# Tracks are the background music. The game works as a kind of a musuc box,
# cycling the available melodies according to the situation. It is up to the
# Module System developer, however, to tell the game what the situation is.
# There are two factors which you can tell the game: situation and culture.
# So you can tell the game that the situation is "ambush" and the culture is
# "khergits", and the game will select the musuc tracks which fit this
# combination of situation and culture and will rotate them randomly. And of
# course, you can also tell the game to play one specific track if you want.
play_sound_at_position = 599 # (play_sound_at_position, <sound_id>, <position>, [options]),
# Plays a sound in specified scene position. See sf_* flags in header_sounds.py for reference on possible options.
play_sound = 600 # (play_sound, <sound_id>, [options]),
# Plays a sound. If the operation is called from agent, scene_prop or item trigger, then the sound will be positional and 3D. See sf_* flags in header_sounds.py for reference on possible options.
play_track = 601 # (play_track, <track_id>, [options]),
# Plays specified music track. Possible options: 0 = finish current then play this, 1 = fade out current and start this, 2 = stop current abruptly and start this
play_cue_track = 602 # (play_cue_track, <track_id>),
# Plays specified music track OVER any currently played music track (so you can get two music tracks playing simultaneously). Hardly useful.
music_set_situation = 603 # (music_set_situation, <situation_type>),
# Sets current situation(s) in the game (see mtf_* flags in header_music.py for reference) so the game engine can pick matching tracks from module_music.py. Use 0 to stop any currently playing music (it will resume when situation is later set to something).
music_set_culture = 604 # (music_set_culture, <culture_type>),
# Sets current culture(s) in the game (see mtf_* flags in header_music.py for reference) so the game engine can pick matching tracks from module_music.py. Use 0 to stop any currently playing music (it will resume when cultures are later set to something).
stop_all_sounds = 609 # (stop_all_sounds, [options]),
# Stops all playing sounds. Version 1.153 options: 0 = stop only looping sounds, 1 = stop all sounds. Version 1.143 options: 0 = let current track finish, 1 = fade it out, 2 = stop it abruptly.
store_last_sound_channel = 615 # (store_last_sound_channel, <destination>),
# Version 1.153+. UNTESTED. Stores the sound channel used for the last sound operation.
stop_sound_channel = 616 # (stop_sound_channel, <sound_channel_no>),
# Version 1.153+. UNTESTED. Stops sound playing on specified sound channel.
################################################################################
# [ Z14 ] POSITIONS
################################################################################
# Positions are the 3D math of the game. If you want to handle objects in
# space, you will inevitably have to deal with positions. Note that while most
# position-handling operations work both on global map and on the scenes,
# there are operations which will only work in one or another mode.
# Each position consists of three parts: coordinates, rotation and scale.
# Coordinates are three numbers - (X,Y,Z) - which define a certain point in
# space relative to the base of coordinates. Most of the time, the base of
# coordinates is either the center of the global map, or the center of the
# scene, but there may be exceptions. Note that all operations with
# coordinates nearly always use fixed point numbers.
# Position rotation determines just that - rotation around corresponding
# world axis. So rotation around Z axis means rotation around vertical axis,
# in other words - turning right and left. Rotation around X and Y axis will
# tilt the position forward/backwards and right/left respectively.
# It is common game convention that X world axis points to the East, Y world
# axis points to the North and Z world axis points straight up. However this
# is so-called global coordinates system, and more often than not you'll be
# dealing with local coordinates. Local coordinates are the coordinate system
# defined by the object's current position. For the object, his X axis is to
# the right, Y axis is forward, and Z axis is up. This is simple enough, but
# consider what happens if that object is turned upside down in world space?
# Object's Z axis will point upwards *from the object's point of view*, in
# other words, in global space it will be pointing *downwards*. And if the
# object is moving, then it's local coordinates system is moving with it...
# you get the idea.
# Imagine the position as a small point with an arrow somewhere in space.
# Position's coordinates are the point's position. Arrow points horizontally
# to the North by default, and position's rotation determines how much was
# it turned in the each of three directions.
# Final element of position is scale. It is of no direct relevance to the
# position itself, and it does not participate in any calculations. However
# it is important when you retrieve or set positions of objects. In this
# case, position's scale is object's scale - so you can shrink that wall
# or quite the opposite, make it grow to the sky, depending on your whim.
# Generic position operations
init_position = 701 # (init_position, <position>),
# Sets position coordinates to [0,0,0], without any rotation and default scale.
copy_position = 700 # (copy_position, <position_target>, <position_source>),
# Makes a duplicate of position_source.
position_copy_origin = 719 # (position_copy_origin, <position_target>, <position_source>),
# Copies coordinates from source position to target position, without changing rotation or scale.
position_copy_rotation = 718 # (position_copy_rotation, <position_target>, <position_source>),
# Copies rotation from source position to target position, without changing coordinates or scale.
position_transform_position_to_parent = 716 # (position_transform_position_to_parent, <position_dest>, <position_anchor>, <position_relative_to_anchor>),
# Converts position from local coordinate space to parent coordinate space. In other words, if you have some position on the scene (anchor) and a position describing some place *relative* to anchor (for example [10,20,0] means "20 meters forward and 10 meters to the right"), after calling this operation you will get that position coordinates on the scene in <position_dest>. Rotation and scale is also taken care of, so you can use relative angles.
position_transform_position_to_local = 717 # (position_transform_position_to_local, <position_dest>, <position_anchor>, <position_source>),
# The opposite to (position_transform_position_to_parent), this operation allows you to get source's *relative* position to your anchor. Suppose you want to run some decision making for your bot agent depending on player's position. In order to know where player is located relative to your bot you call (position_transform_position_to_local, <position_dest>, <bot_position>, <player_position>). Then we check position_dest's Y coordinate - if it's negative, then the player is behind our bot's back.
# Position (X,Y,Z) coordinates
position_get_x = 726 # (position_get_x, <destination_fixed_point>, <position>),
# Return position X coordinate (to the east, or to the right). Base unit is meters. Use (set_fixed_point_multiplier) to set another measurement unit (100 will get you centimeters, 1000 will get you millimeters, etc).
position_get_y = 727 # (position_get_y, <destination_fixed_point>, <position>),
# Return position Y coordinate (to the north, or forward). Base unit is meters. Use (set_fixed_point_multiplier) to set another measurement unit (100 will get you centimeters, 1000 will get you millimeters, etc).
position_get_z = 728 # (position_get_z, <destination_fixed_point>, <position>),
# Return position Z coordinate (to the top). Base unit is meters. Use (set_fixed_point_multiplier) to set another measurement unit (100 will get you centimeters, 1000 will get you millimeters, etc).
position_set_x = 729 # (position_set_x, <position>, <value_fixed_point>),
# Set position X coordinate.
position_set_y = 730 # (position_set_y, <position>, <value_fixed_point>),
# Set position Y coordinate.
position_set_z = 731 # (position_set_z, <position>, <value_fixed_point>),
# Set position Z coordinate.
position_move_x = 720 # (position_move_x, <position>, <movement>, [value]),
# Moves position along X axis. Movement distance is in cms. Optional parameter determines whether the position is moved along the local (value=0) or global (value=1) X axis (i.e. whether the position will be moved to it's right/left, or to the global east/west).
position_move_y = 721 # (position_move_y, <position>, <movement>,[value]),
# Moves position along Y axis. Movement distance is in cms. Optional parameter determines whether the position is moved along the local (value=0) or global (value=1) Y axis (i.e. whether the position will be moved forward/backwards, or to the global north/south).
position_move_z = 722 # (position_move_z, <position>, <movement>,[value]),
# Moves position along Z axis. Movement distance is in cms. Optional parameter determines whether the position is moved along the local (value=0) or global (value=1) Z axis (i.e. whether the position will be moved to it's above/below, or to the global above/below - these directions will be different if the position is tilted).
position_set_z_to_ground_level = 791 # (position_set_z_to_ground_level, <position>),
# This will bring the position Z coordinate so it rests on the ground level (i.e. an agent could stand on that position). This takes scene props with their collision meshes into account. Only works during a mission, so you can't measure global map height using this.
position_get_distance_to_terrain = 792 # (position_get_distance_to_terrain, <destination>, <position>),
# This will measure the distance between position and terrain below, ignoring all scene props and their collision meshes. Operation only works on the scenes and cannot be used on the global map.
position_get_distance_to_ground_level = 793 # (position_get_distance_to_ground_level, <destination>, <position>),
# This will measure the distance between position and the ground level, taking scene props and their collision meshes into account. Operation only works on the scenes and cannot be used on the global map.
# Position rotation
position_get_rotation_around_x = 742 # (position_get_rotation_around_x, <destination>, <position>),
# Returns angle (in degrees) that the position is rotated around X axis (tilt forward/backwards).
position_get_rotation_around_y = 743 # (position_get_rotation_around_y, <destination>, <position>),
# Returns angle (in degrees) that the position is rotated around Y axis (tilt right/left).
position_get_rotation_around_z = 740 # (position_get_rotation_around_z, <destination>, <position>),
# Returns angle (in degrees) that the position is rotated around Z axis (turning right/left).
position_rotate_x = 723 # (position_rotate_x, <position>, <angle>),
# Rotates position around it's X axis (tilt forward/backwards).
position_rotate_y = 724 # (position_rotate_y, <position>, <angle>),
# Rotates position around Y axis (tilt right/left).
position_rotate_z = 725 # (position_rotate_z, <position>, <angle>, [use_global_z_axis]),
# Rotates position around Z axis (rotate right/left). Pass 1 for use_global_z_axis to rotate the position around global axis instead.
position_rotate_x_floating = 738 # (position_rotate_x_floating, <position>, <angle_fixed_point>),
# Same as (position_rotate_x), but takes fixed point value as parameter, allowing for more precise rotation.
position_rotate_y_floating = 739 # (position_rotate_y_floating, <position>, <angle_fixed_point>),
# Same as (position_rotate_y), but takes fixed point value as parameter, allowing for more precise rotation.
position_rotate_z_floating = 734 # (position_rotate_z_floating, <position_no>, <angle_fixed_point>),
# Version 1.161+. Same as (position_rotate_z), but takes fixed point value as parameter, allowing for more precise rotation.
# Position scale
position_get_scale_x = 735 # (position_get_scale_x, <destination_fixed_point>, <position>),
# Retrieves position scaling along X axis.
position_get_scale_y = 736 # (position_get_scale_y, <destination_fixed_point>, <position>),
# Retrieves position scaling along Y axis.
position_get_scale_z = 737 # (position_get_scale_z, <destination_fixed_point>, <position>),
# Retrieves position scaling along Z axis.
position_set_scale_x = 744 # (position_set_scale_x, <position>, <value_fixed_point>),
# Sets position scaling along X axis.
position_set_scale_y = 745 # (position_set_scale_y, <position>, <value_fixed_point>),
# Sets position scaling along Y axis.
position_set_scale_z = 746 # (position_set_scale_z, <position>, <value_fixed_point>),
# Sets position scaling along Z axis.
# Measurement of distances and angles
get_angle_between_positions = 705 # (get_angle_between_positions, <destination_fixed_point>, <position_no_1>, <position_no_2>),
# Calculates angle between positions, using positions as vectors. Only rotation around Z axis is used. In other words, the function returns the difference between Z rotations of both positions.
position_has_line_of_sight_to_position = 707 # (position_has_line_of_sight_to_position, <position_no_1>, <position_no_2>),
# Checks that you can see one position from another. This obviously implies that both positions must be in global space. Note this is computationally expensive, so try to keep number of these to a minimum.
get_distance_between_positions = 710 # (get_distance_between_positions, <destination>, <position_no_1>, <position_no_2>),
# Returns distance between positions in centimeters.
get_distance_between_positions_in_meters = 711 # (get_distance_between_positions_in_meters, <destination>, <position_no_1>, <position_no_2>),
# Returns distance between positions in meters.
get_sq_distance_between_positions = 712 # (get_sq_distance_between_positions, <destination>, <position_no_1>, <position_no_2>),
# Returns squared distance between two positions in centimeters.
get_sq_distance_between_positions_in_meters = 713 # (get_sq_distance_between_positions_in_meters, <destination>, <position_no_1>, <position_no_2>),
# Returns squared distance between two positions in meters.
position_is_behind_position = 714 # (position_is_behind_position, <position_base>, <position_to_check>),
# Checks if the second position is behind the first.
get_sq_distance_between_position_heights = 715 # (get_sq_distance_between_position_heights, <destination>, <position_no_1>, <position_no_2>),
# Returns squared distance between position *heights* in centimeters.
position_normalize_origin = 741 # (position_normalize_origin, <destination_fixed_point>, <position>),
# What this operation seems to do is calculate the distance between the zero point [0,0,0] and the point with position's coordinates. Can be used to quickly calculate distance to relative positions.
position_get_screen_projection = 750 # (position_get_screen_projection, <position_screen>, <position_world>),
# Calculates the screen coordinates of the position and stores it as position_screen's X and Y coordinates.
# Global map positions
map_get_random_position_around_position = 1627 # (map_get_random_position_around_position, <dest_position_no>, <source_position_no>, <radius>),
# Returns a random position on the global map in the vicinity of the source_position.
map_get_land_position_around_position = 1628 # (map_get_land_position_around_position, <dest_position_no>, <source_position_no>, <radius>),
# Returns a random position on the global map in the vicinity of the source_position. Will always return a land position (i.e. some place you can walk to).
map_get_water_position_around_position = 1629 # (map_get_water_position_around_position, <dest_position_no>, <source_position_no>, <radius>),
# Returns a random position on the global map in the vicinity of the source_position. Will always return a water position (i.e. sea, lake or river).
################################################################################
# [ Z15 ] GAME NOTES
################################################################################
# The game provides the player with the Notes screen, where there are several
# sections: Troops, Factions, Parties, Quests and Information. This is the
# player's "diary", where all information player knows is supposed to be
# stored. With the operations from this section, modder can control what
# objects the player will be able to see in their corresponding sections of
# the Notes screen, and what information will be displayed on each object.
# Note that there's a number of engine-called scripts which take priority to
# text notes created by these operations. Any information in these notes will
# only be visible to the player if those scripts "refuse" to generate the note
# page dynamically. The following scripts can override these notes:
# script_game_get_troop_note
# script_game_get_center_note
# script_game_get_faction_notze
# script_game_get_quest_note
# script_game_get_info_page_note
troop_set_note_available = 1095 # (troop_set_note_available, <troop_id>, <value>),
# Enables (value = 1) or disables (value = 0) troop's page in the Notes / Characters section.
add_troop_note_tableau_mesh = 1108 # (add_troop_note_tableau_mesh, <troop_id>, <tableau_material_id>),
# Adds graphical elements to the troop's information page (usually banner and portrait).
add_troop_note_from_dialog = 1114 # (add_troop_note_from_dialog, <troop_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to troop notes. Each troop has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_troop_note_from_sreg = 1117 # (add_troop_note_from_sreg, <troop_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to troop notes. Each troop has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
faction_set_note_available = 1096 # (faction_set_note_available, <faction_id>, <value>), #1 = available, 0 = not available
# Enables (value = 1) or disables (value = 0) faction's page in the Notes / Characters section.
add_faction_note_tableau_mesh = 1109 # (add_faction_note_tableau_mesh, <faction_id>, <tableau_material_id>),
# Adds graphical elements to the faction's information page (usually graphical collage).
add_faction_note_from_dialog = 1115 # (add_faction_note_from_dialog, <faction_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to faction notes. Each faction has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_faction_note_from_sreg = 1118 # (add_faction_note_from_sreg, <faction_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to faction notes. Each faction has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
party_set_note_available = 1097 # (party_set_note_available, <party_id>, <value>), #1 = available, 0 = not available
# Enables (value = 1) or disables (value = 0) party's page in the Notes / Characters section.
add_party_note_tableau_mesh = 1110 # (add_party_note_tableau_mesh, <party_id>, <tableau_material_id>),
# Adds graphical elements to the party's information page (usually map icon).
add_party_note_from_dialog = 1116 # (add_party_note_from_dialog, <party_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to party notes. Each party has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_party_note_from_sreg = 1119 # (add_party_note_from_sreg, <party_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to party notes. Each party has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
quest_set_note_available = 1098 # (quest_set_note_available, <quest_id>, <value>), #1 = available, 0 = not available
# Enables (value = 1) or disables (value = 0) quest's page in the Notes / Characters section.
add_quest_note_tableau_mesh = 1111 # (add_quest_note_tableau_mesh, <quest_id>, <tableau_material_id>),
# Adds graphical elements to the quest's information page (not used in Native).
add_quest_note_from_dialog = 1112 # (add_quest_note_from_dialog, <quest_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to quest notes. Each quest has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_quest_note_from_sreg = 1113 # (add_quest_note_from_sreg, <quest_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to quest notes. Each quest has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_info_page_note_tableau_mesh = 1090 # (add_info_page_note_tableau_mesh, <info_page_id>, <tableau_material_id>),
# Adds graphical elements to the info page (not used in Native).
add_info_page_note_from_dialog = 1091 # (add_info_page_note_from_dialog, <info_page_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to info page notes. Each info page has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_info_page_note_from_sreg = 1092 # (add_info_page_note_from_sreg, <info_page_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to info page notes. Each info page has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
################################################################################
# [ Z16 ] TABLEAUS AND HERALDICS
################################################################################
# Tableaus are the tool that gives you limited access to the game graphical
# renderer. If you know 3D graphics, you know that all 3D objects consist of
# a mesh (which defines it's form) and the material (which defines how this
# mesh is "painted"). With tableau functions you can do two things. First, you
# can replace or alter the materials used to render the game objects (with
# many restrictions). If this sounds esoteric to you, have a look at the game
# heraldry - it is implemented using tableaus. Second, you can render images
# of various game objects and place them on the game menus, presentations and
# so on. For example, if you open the game Inventory window, you can see your
# character in his current equipment. This character is rendered using tableau
# operations. Similarly, if you open the Notes screen and select some kingdom
# lord on the Troops section, you will see that lord's face and banner. Both
# face and banner are drawn using tableaus.
cur_item_set_tableau_material = 1981 # (cur_item_set_tableu_material, <tableau_material_id>, <instance_code>),
# Can only be used inside ti_on_init_item trigger in module_items.py. Assigns tableau to the item instance. Value of <instance_code> will be passed to tableau code. Commonly used for heraldic armors and shields.
cur_scene_prop_set_tableau_material = 1982 # (cur_scene_prop_set_tableau_material, <tableau_material_id>, <instance_code>),
# Can only be used inside ti_on_init_scene_prop trigger in module_scene_props.py. Assigns tableau to the scene prop instance. Value of <instance_code> will be passed to tableau code. Commonly used for static banners.
cur_map_icon_set_tableau_material = 1983 # (cur_map_icon_set_tableau_material, <tableau_material_id>, <instance_code>),
# Can only be used inside ti_on_init_map_icon trigger in module_map_icons.py. Assigns tableau to the icon prop instance. Value of <instance_code> will be passed to tableau code. Commonly used for player/lord party banners.
cur_agent_set_banner_tableau_material = 1986 # (cur_agent_set_banner_tableau_material, <tableau_material_id>)
# Can only be used inside ti_on_agent_spawn trigger in module_mission_templates. Assigns heraldry .
# Operations used in module_tableau_materials.py module
cur_tableau_add_tableau_mesh = 1980 # (cur_tableau_add_tableau_mesh, <tableau_material_id>, <value>, <position_register_no>),
# Used in module_tableau_materials.py to add one tableau to another. Value parameter is passed to tableau_material as is.
cur_tableau_render_as_alpha_mask = 1984 # (cur_tableau_render_as_alpha_mask)
# Tells the engine to treat the tableau as an alpha (transparency) mask.
cur_tableau_set_background_color = 1985 # (cur_tableau_set_background_color, <value>),
# Defines solid background color for the current tableau.
cur_tableau_set_ambient_light = 1987 # (cur_tableau_set_ambient_light, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
# Not documented. Used for tableaus rendered from 3D objects to provide uniform tinted lighting.
cur_tableau_set_camera_position = 1988 # (cur_tableau_set_camera_position, <position>),
# Not documented. Used for tableaus rendered from 3D objects to position camera as necessary (usually with a perspective camera).
cur_tableau_set_camera_parameters = 1989 # (cur_tableau_set_camera_parameters, <is_perspective>, <camera_width_times_1000>, <camera_height_times_1000>, <camera_near_times_1000>, <camera_far_times_1000>),
# Not documented. Used to define camera parameters for tableau rendering. Perspective camera is generally used to render 3D objects for tableaus, while non-perspective camera is used to modify tableau texture meshes.
cur_tableau_add_point_light = 1990 # (cur_tableau_add_point_light, <position>, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
# Not documented. Typically used for tableaus rendered from 3D objects to add a point light source.
cur_tableau_add_sun_light = 1991 # (cur_tableau_add_sun_light, <position>, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
# Not documented. Typically used for tableaus rendered from 3D objects to add a directional light source. Note that position coordinates do not matter, only rotation (i.e. light rays direction) does.
cur_tableau_add_mesh = 1992 # (cur_tableau_add_mesh, <mesh_id>, <position>, <value_fixed_point>, <value_fixed_point>),
# Adds a static mesh to the tableau with specified offset, scale and alpha. First value fixed point is the scale factor, second value fixed point is alpha. use 0 for default values.
cur_tableau_add_mesh_with_vertex_color = 1993 # (cur_tableau_add_mesh_with_vertex_color, <mesh_id>, <position>, <value_fixed_point>, <value_fixed_point>, <value>),
# Adds a static mesh to the tableau with specified offset, scale, alpha and vertex color. First value fixed point is the scale factor, second value fixed point is alpha. Value is vertex color.
cur_tableau_add_mesh_with_scale_and_vertex_color = 2000 # (cur_tableau_add_mesh_with_scale_and_vertex_color, <mesh_id>, <position>, <scale_position>, <value_fixed_point>, <value>),
# Similar to (cur_tableau_add_mesh_with_vertex_color), but allows non-uniform scaling. Scale factors are stored as (x,y,z) position properties with fixed point values.
cur_tableau_add_map_icon = 1994 # (cur_tableau_add_map_icon, <map_icon_id>, <position>, <value_fixed_point>),
# Adds a rendered image of a map icon to current tableau. Last parameter is the scale factor for the model.
cur_tableau_add_troop = 1995 # (cur_tableau_add_troop, <troop_id>, <position>, <animation_id>, <instance_no>),
# Adds a rendered image of the troop in a specified animation to current tableau. If instance_no is 0 or less, then the face is not generated randomly (important for heroes).
cur_tableau_add_horse = 1996 # (cur_tableau_add_horse, <item_id>, <position>, <animation_id>),
# Adds a rendered image of a horse in a specified animation to current tableau.
cur_tableau_set_override_flags = 1997 # (cur_tableau_set_override_flags, <value>),
# When creating a troop image for current tableau, this operation allows to override troop's inventory partially or completely. See af_* flags in header_mission_templates.py for reference.
cur_tableau_clear_override_items = 1998 # (cur_tableau_clear_override_items),
# Removes and previously defined equipment overrides for the troop, allowing to start from scratch.
cur_tableau_add_override_item = 1999 # (cur_tableau_add_override_item, <item_kind_id>),
# When creating a troop image for current tableau, the operation will add a new item to troop's equipment.
################################################################################
# [ Z17 ] STRING OPERATIONS
################################################################################
# The game provides you only limited control over string information. Most
# operations will either retrieve some string (usually the name) from the game
# object, or set that object's name to a string.
# Two important functions are str_store_string and str_store_string_reg. They
# are different from all others because they not only assign the string to a
# string register, they *process* it. For example, if source string contains
# "{reg3}", then the resulting string will have the register name and it's
# surrounding brackets replaced with the value currently stored in that
# register. Other strings can be substituted as well, and even some limited
# logic can be implemented using this mechanism. You can try to read through
# the module_strings.py file and try to deduce what each particular
# substitution does.
# Conditional operations
str_is_empty = 2318 # (str_is_empty, <string_register>),
# Checks that referenced string register is empty.
# Other string operations
str_clear = 2319 # (str_clear, <string_register>)
# Clears the contents of the referenced string register.
str_store_string = 2320 # (str_store_string, <string_register>, <string_id>),
# Stores a string value in the referenced string register. Only string constants and quick strings can be stored this way.
str_store_string_reg = 2321 # (str_store_string, <string_register>, <string_no>),
# Copies the contents of one string register from another.
str_store_troop_name = 2322 # (str_store_troop_name, <string_register>, <troop_id>),
# Stores singular troop name in referenced string register.
str_store_troop_name_plural = 2323 # (str_store_troop_name_plural, <string_register>, <troop_id>),
# Stores plural troop name in referenced string register.
str_store_troop_name_by_count = 2324 # (str_store_troop_name_by_count, <string_register>, <troop_id>, <number>),
# Stores singular or plural troop name with number of troops ("29 Archers", "1 Bandit").
str_store_item_name = 2325 # (str_store_item_name, <string_register>, <item_id>),
# Stores singular item name in referenced string register.
str_store_item_name_plural = 2326 # (str_store_item_name_plural, <string_register>, <item_id>),
# Stores plural item name in referenced string register.
str_store_item_name_by_count = 2327 # (str_store_item_name_by_count, <string_register>, <item_id>),
# Stores singular or plural item name with number of items ("11 Swords", "1 Bottle of Wine").
str_store_party_name = 2330 # (str_store_party_name, <string_register>, <party_id>),
# Stores party name in referenced string register.
str_store_agent_name = 2332 # (str_store_agent_name, <string_register>, <agent_id>),
# Stores agent name in referenced string register.
str_store_faction_name = 2335 # (str_store_faction_name, <string_register>, <faction_id>),
# Stores faction name in referenced string register.
str_store_quest_name = 2336 # (str_store_quest_name, <string_register>, <quest_id>),
# Stores quest name (as defined in module_quests.py) in referenced string register.
str_store_info_page_name = 2337 # (str_store_info_page_name, <string_register>, <info_page_id>),
# Stores info page title (as defined in module_info_pages.py) in referenced string register.
str_store_date = 2340 # (str_store_date, <string_register>, <number_of_hours_to_add_to_the_current_date>),
# Stores formatted date string, using the number of hours since start of the game (can be retrieved by a call to store_current_hours).
str_store_troop_name_link = 2341 # (str_store_troop_name_link, <string_register>, <troop_id>),
# Stores troop name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced troop.
str_store_party_name_link = 2342 # (str_store_party_name_link, <string_register>, <party_id>),
# Stores party name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced party.
str_store_faction_name_link = 2343 # (str_store_faction_name_link, <string_register>, <faction_id>),
# Stores faction name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced faction.
str_store_quest_name_link = 2344 # (str_store_quest_name_link, <string_register>, <quest_id>),
# Stores quest name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced quest.
str_store_info_page_name_link = 2345 # (str_store_info_page_name_link, <string_register>, <info_page_id>),
# Stores info page title as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced info page.
str_store_class_name = 2346 # (str_store_class_name, <stribg_register>, <class_id>)
# Stores name of the selected troop class (Infantry, Archers, Cavalry or any of the custom class names) in referenced string register.
game_key_get_mapped_key_name = 65 # (game_key_get_mapped_key_name, <string_register>, <game_key>),
# Version 1.161+. Stores human-readable key name that's currently assigned to the provided game key. May store "unknown" and "No key assigned" strings (the latter is defined in languages/en/ui.csv, the former seems to be hardcoded).
# Network/multiplayer-related string operations
str_store_player_username = 2350 # (str_store_player_username, <string_register>, <player_id>),
# Stores player's multiplayer username in referenced string register. Can be used in multiplayer mode only.
str_store_server_password = 2351 # (str_store_server_password, <string_register>),
# Stores server's password in referenced string register.
str_store_server_name = 2352 # (str_store_server_name, <string_register>),
# Stores server's name (as displayed to clients in server's list window) in referenced string register.
str_store_welcome_message = 2353 # (str_store_welcome_message, <string_register>),
# Stores server's welcome message in referenced string register.
str_encode_url = 2355 # (str_encode_url, <string_register>),
# This operation will "sanitize" a string to be used as part of network URL, replacing any non-standard characters with their '%'-codes.
################################################################################
# [ Z18 ] OUTPUT AND MESSAGES
################################################################################
# These operations will provide some textual information to the player during
# the game. There are three operations which will generate a game message
# (displayed as a chat-like series of text strings in the bottom-left part of
# the screen), while most others will be displaying various types of dialog
# boxes. You can also ask a question to player using these operations.
display_debug_message = 1104 # (display_debug_message, <string_id>, [hex_colour_code]),
# Displays a string message, but only in debug mode, using provided color (hex-coded 0xRRGGBB). The message is additionally written to rgl_log.txt file in both release and debug modes when edit mode is enabled.
display_log_message = 1105 # (display_log_message, <string_id>, [hex_colour_code]),
# Display a string message using provided color (hex-coded 0xRRGGBB). The message will also be written to game log (accessible through Notes / Game Log), and will persist between sessions (i.e. it will be stored as part of the savegame).
display_message = 1106 # (display_message, <string_id>,[hex_colour_code]),
# Display a string message using provided color (hex-coded 0xRRGGBB).
set_show_messages = 1107 # (set_show_messages, <value>),
# Suppresses (value = 0) or enables (value = 1) game messages, including those generated by the game engine.
tutorial_box = 1120 # (tutorial_box, <string_id>, <string_id>),
# This operation is deprecated but is still used in Native.
dialog_box = 1120 # (dialog_box, <text_string_id>, [title_string_id]),
# Displays a popup window with the text message and an optional caption.
question_box = 1121 # (question_box, <string_id>, [<yes_string_id>], [<no_string_id>]),
# Displays a popup window with the text of the question and two buttons (Yes and No by default, but can be overridden). When the player selects one of possible responses, a ti_on_question_answered trigger will be executed.
tutorial_message = 1122 # (tutorial_message, <string_id>, [color], [auto_close_time]),
# Displays a popup window with tutorial text stored in referenced string or string register. Use -1 to close any currently open tutorial box. Optional parameters allow you to define text color and time period after which the tutorial box will close automatically.
tutorial_message_set_position = 1123 # (tutorial_message_set_position, <position_x>, <position_y>),
# Defines screen position for the tutorial box. Assumes screen size is 1000*750.
tutorial_message_set_size = 1124 # (tutorial_message_set_size, <size_x>, <size_y>),
# Defines size of the tutorial box. Assumes screen size is 1000*750.
tutorial_message_set_center_justify = 1125 # (tutorial_message_set_center_justify, <val>),
# Sets tutorial box to be center justified (value = 1), or use positioning dictated by tutorial_message_set_position (value = 0).
tutorial_message_set_background = 1126 # (tutorial_message_set_background, <value>),
# Defines whether the tutorial box will have a background or not (1 or 0). Default is off.
################################################################################
# [ Z19 ] GAME CONTROL: SCREENS, MENUS, DIALOGS AND ENCOUNTERS
################################################################################
# An encounter is what happens when player's party meets another party on the
# world map. While most operations in the game can be performed outside of
# encounter, there's one thing you can only do when in encounter context -
# standard game battle. When you are initiating the battle from an encounter,
# the game engine will do most of the grunt work for you. You can order the
# engine to add some parties to the battle on this or that side, and the
# soldiers from those parties will spawn on the battlefield, in the numbers
# proportional to the party sizes, and the agents will maintain links with
# their parties. If agents earn experience, this will be reflected on the
# world map, and if some agents die, party sizes will be decreased. All this
# stuff can potentially be emulated by the Module System code, but it's tons
# of work and is still much less efficient than the tool the game engine
# already provides to you.
# An important notice: when player encounters an AI party on the map, the game
# calls "game_event_party_encounter" script in the module_scripts.py. So if
# you want to implement some non-standard processing of game encounters, this
# is the place you should start from. Also note that the game implements the
# Camp menu as an encounter with a hardcoded party "p_camp_bandits".
# Also you can find many operations in this section dealing with game screens,
# game menus and game dialogs. Keep in mind that some screens only make sense
# in certain contexts, and game menus are only available on the world map, you
# cannot use game menus during the mission.
# Conditional operations
entering_town = 36 # (entering_town, <town_id>),
# Apparently deprecated.
encountered_party_is_attacker = 39 # (encountered_party_is_attacker),
# Checks that the party encountered on the world map was following player (i.e. either player was trying to run away or at the very least this is a head-on clash).
conversation_screen_is_active = 42 # (conversation_screen_active),
# Checks that the player is currently in dialogue with some agent. Can only be used in triggers of module_mission_templates.py file.
in_meta_mission = 44 # (in_meta_mission),
# Deprecated, do not use.
# Game hardcoded windows and related operations
change_screen_return = 2040 # (change_screen_return),
# Closes any current screen and returns the player to worldmap (to scene?). 4research how it behaves in missions.
change_screen_loot = 2041 # (change_screen_loot, <troop_id>),
# Opens the Looting interface, using the provided troop as loot storage. Player has full access to troop inventory.
change_screen_trade = 2042 # (change_screen_trade, [troop_id]),
# Opens the Trade screen, using the provided troop as the trading partner. When called from module_dialogs, troop_id is optional and defaults to current dialogue partner.
change_screen_exchange_members = 2043 # (change_screen_exchange_members, [exchange_leader], [party_id]),
# Opens the Exchange Members With Party interface, using the specified party_id. If called during an encounter, party_id is optional and defaults to the encountered party. Second parameter determines whether the party leader is exchangeable (useful when managing the castle garrison).
change_screen_trade_prisoners = 2044 # (change_screen_trade_prisoners),
# Opens the Sell Prisoners interface. Script "script_game_get_prisoner_price" will be used to determine prisoner price.
change_screen_buy_mercenaries = 2045 # (change_screen_buy_mercenaries),
# Opens the Buy Mercenaries interface, where player can hire troops from the party specified with (set_mercenary_source_party) operation. Only works from the dialog.
change_screen_view_character = 2046 # (change_screen_view_character),
# Opens the character screen of another troop. Can only be used in dialogs.
change_screen_training = 2047 # (change_screen_training),
# Opens the character screen for the troop that player is currently talking to. Only works in dialogs. Deprecated, use (change_screen_view_character) instead.
change_screen_mission = 2048 # (change_screen_mission),
# Starts the mission, using previously defined scene and mission template.
change_screen_map_conversation = 2049 # (change_screen_map_conversation, <troop_id>),
# Starts the mission, same as (change_screen_mission). However once the mission starts, player will get into dialog with the specified troop, and once the dialog ends, the mission will automatically end.
change_screen_exchange_with_party = 2050 # (change_screen_exchange_with_party, <party_id>),
# Effectively duplicates (change_screen_exchange_members), but party_id parameter is obligatory and the operation doesn't have an option to prevent party leader from being exchanged.
change_screen_equip_other = 2051 # (change_screen_equip_other, [troop_id]),
# Opens the Equip Companion interface. When calling from a dialog, it is not necessary to specify troop_id.
change_screen_map = 2052 # (change_screen_map),
# Changes the screen to global map, closing any currently running game menu, dialog or mission.
change_screen_notes = 2053 # (change_screen_notes, <note_type>, <object_id>),
# Opens the Notes screen, in the selected category (note_type: 1=troops, 2=factions, 3=parties, 4=quests, 5=info_pages) and for the specified object in that category.
change_screen_quit = 2055 # (change_screen_quit),
# Quits the game to the main menu.
change_screen_give_members = 2056 # (change_screen_give_members, [party_id]),
# Opens the Give Troops to Another Party interface. Party_id parameter is optional during an encounter and will use encountered party as default value.
change_screen_controls = 2057 # (change_screen_controls),
# Opens the standard Configure Controls screen, pausing the game.
change_screen_options = 2058 # (change_screen_options),
# Opens the standard Game Options screen, pausing the game.
set_mercenary_source_party = 1320 # (set_mercenary_source_party, <party_id>),
# Defines the party from which the player will buy mercenaries with (change_screen_buy_mercenaries).
start_map_conversation = 1025 # (start_map_conversation, <troop_id>, [troop_dna]),
# Starts a conversation with the selected troop. Can be called directly from global map or game menus. Troop DNA parameter allows you to randomize non-hero troop appearances.
# Game menus
set_background_mesh = 2031 # (set_background_mesh, <mesh_id>),
# Sets the specified mesh as the background for the current menu. Possibly can be used for dialogs or presentations, but was not tested.
set_game_menu_tableau_mesh = 2032 # (set_game_menu_tableau_mesh, <tableau_material_id>, <value>, <position_register_no>),
# Adds a tableau to the current game menu screen. Position (X,Y) coordinates define mesh position, Z coordinate defines scaling. Parameter <value> will be passed as tableau_material script parameter.
jump_to_menu = 2060 # (jump_to_menu, <menu_id>),
# Opens the specified game menu. Note this only happens after the current block of code completes execution.
disable_menu_option = 2061 # (disable_menu_option),
# Never used in native. Apparently deprecated as menu options have prerequisite code blocks now.
# Game encounter handling operations
set_party_battle_mode = 1020 # (set_party_battle_mode),
# Used before or during the mission to start battle mode (and apparently make agents use appropriate AI).
finish_party_battle_mode = 1019 # (finish_party_battle_mode),
# Used during the mission to stop battle mode.
start_encounter = 1300 # (start_encounter, <party_id>),
# Forces the player party to initiate encounter with the specified party. Distance does not matter in this situation.
leave_encounter = 1301 # (leave_encounter),
# Leaves encounter mode.
encounter_attack = 1302 # (encounter_attack),
# Apparently starts the standard battle with the encountered party. 4research.
select_enemy = 1303 # (select_enemy, <value>),
# When joining a battle, this determines what side player will be helping. Defending party is always 0, and attacking party is always 1. Player can support either attackers (value = 0, i.e. defenders are the enemy) or defenders (value = 1).
set_passage_menu = 1304 # (set_passage_menu, <value>),
# When setting up a mission, this allows you to determine what game menu will be used for that mission passages instead of "mnu_town". Passage menu item number will determine what menu option (in sequential order, starting from 0) will be executed when the player activates that passage on the scene. Note that menu option condition code block will be ignored.
start_mission_conversation = 1920 # (start_mission_conversation, <troop_id>),
# During the mission, initiates the dialog with specified troop.
set_conversation_speaker_troop = 2197 # (set_conversation_speaker_troop, <troop_id>),
# Allows to dynamically switch speaking troops during the dialog when developer doesn't know in advance who will be doing the speaking. Should be placed in post-talk code section of dialog entry.
set_conversation_speaker_agent = 2198 # (set_conversation_speaker_agent, <agent_id>),
# Allows to dynamically switch speaking agents during the dialog when developer doesn't know in advance who will be doing the speaking. Should be placed in post-talk code section of dialog entry.
store_conversation_agent = 2199 # (store_conversation_agent, <destination>),
# Stores identifier of agent who is currently speaking.
store_conversation_troop = 2200 # (store_conversation_troop, <destination>),
# Stores identifier of troop who is currently speaking.
store_partner_faction = 2201 # (store_partner_faction, <destination>),
# Stores faction of the troop player is speaking to.
store_encountered_party = 2202 # (store_encountered_party, <destination>),
# Stores identifier of the encountered party.
store_encountered_party2 = 2203 # (store_encountered_party2, <destination>),
# Stores the identifier of the second encountered party (when first party is in battle, this one will return it's battle opponent).
set_encountered_party = 2205 # (set_encountered_party, <party_no>),
# Sets the specified party as encountered by player, but does not run the entire encounter routine. Used in Native during chargen to set up the starting town and then immediately throw the player into street fight without showing him the town menu.
end_current_battle = 1307 # (end_current_battle),
# Apparently ends the battle between player's party and it's opponent. Exact effects not clear. 4research.
# Operations specific to dialogs
store_repeat_object = 50 # (store_repeat_object, <destination>),
# Used in the dialogs code in combination with repeat_for_* dialog parameters, when creating dynamical player responses. Stores the value for the current iteration (i.e. a faction ID when repeat_for_factions is used, etc).
talk_info_show = 2020 # (talk_info_show, <hide_or_show>),
# Used in the dialogs code to display relations bar on opponent's portrait when mouse is hovering over it (value = 1) or disable this functionality (value = 0)
talk_info_set_relation_bar = 2021 # (talk_info_set_relation_bar, <value>),
# Sets the relations value for relationship bar in the dialog. Value should be in range -100..100.
talk_info_set_line = 2022 # (talk_info_set_line, <line_no>, <string_no>)
# Sets the additional text information (usually troop name) to be displayed together with the relations bar.
################################################################################
# [ Z20 ] SCENES AND MISSIONS
################################################################################
# To put the player into a 3D scene, you need two things. First is the scene
# itself. All scenes are defined in module_scenes.py file. The second element
# is no less important, and it's called mission template. Mission template
# will determine the context of the events on the scene - who will spawn
# where, who will be hostile or friendly to player or to each other, etc.
# Because of all this, when player is put on the 3D scene in the game, it is
# commonly said that player is "in a mission".
# Conditional operations
all_enemies_defeated = 1003 # (all_enemies_defeated, [team_id]),
# Checks if all agents from the specified team are defeated. When team_id is omitted default enemy team is checked.
race_completed_by_player = 1004 # (race_completed_by_player),
# Not documented. Not used in Native. Apparently deprecated.
num_active_teams_le = 1005 # (num_active_teams_le, <value>),
# Checks that the number of active teams (i.e. teams with at least one active agent) is less than or equal to given value.
main_hero_fallen = 1006 # (main_hero_fallen),
# Checks that the player has been knocked out.
scene_allows_mounted_units = 1834 # (scene_allows_mounted_units),
# Not documented. Used in multiplayer, but it's not clear where horses could be disallowed in the first place. 4research.
is_zoom_disabled = 2222 # (is_zoom_disabled),
# Version 1.153+. Checks that the zoom is currently disabled in the game.
# Scene slot operations
scene_set_slot = 503 # (scene_set_slot, <scene_id>, <slot_no>, <value>),
scene_get_slot = 523 # (scene_get_slot, <destination>, <scene_id>, <slot_no>),
scene_slot_eq = 543 # (scene_slot_eq, <scene_id>, <slot_no>, <value>),
scene_slot_ge = 563 # (scene_slot_ge, <scene_id>, <slot_no>, <value>),
# Scene visitors handling operations
add_troop_to_site = 1250 # (add_troop_to_site, <troop_id>, <scene_id>, <entry_no>),
# Set troop's position in the world to the specified scene and entry point. Entry point must have mtef_scene_source type. Agent will always appear at that entry when entering that scene. No longer used in Native.
remove_troop_from_site = 1251 # (remove_troop_from_site, <troop_id>, <scene_id>),
# Removes the troop from the specified scene. No longer used in Native.
modify_visitors_at_site = 1261 # (modify_visitors_at_site, <scene_id>),
# Declares the scene which visitors will be modified from that moment on.
reset_visitors = 1262 # (reset_visitors),
# Resets all visitors to the scene.
set_visitor = 1263 # (set_visitor, <entry_no>, <troop_id>, [<dna>]),
# Adds the specified troop as the visitor to the entry point of the scene defined with (modify_visitors_at_site). Entry point must have mtef_visitor_source type. Optional DNA parameter allows for randomization of agent looks (only applies to non-hero troops).
set_visitors = 1264 # (set_visitors, <entry_no>, <troop_id>, <number_of_troops>),
# Save as (set_visitors), but spawns an entire group of some troop type.
add_visitors_to_current_scene = 1265 # (add_visitors_to_current_scene, <entry_no>, <troop_id>, <number_of_troops>, <team_no>, <group_no>),
# Adds a number of troops to the specified entry point when the scene is already loaded. Team and group parameters are used in multiplayer mode only, singleplayer mode uses team settings for selected entry point as defined in module_mission_templates.py.
mission_tpl_entry_set_override_flags = 1940 # (mission_entry_set_override_flags, <mission_template_id>, <entry_no>, <value>),
# Allows modder to use a different set of equipment override flags (see af_* constants in header_mission_templates.py) for the selected entry point.
mission_tpl_entry_clear_override_items = 1941 # (mission_entry_clear_override_items, <mission_template_id>, <entry_no>),
# Clears the list of override equipment provided by the entry point definition in module_mission_templates.py.
mission_tpl_entry_add_override_item = 1942 # (mission_entry_add_override_item, <mission_template_id>, <entry_no>, <item_kind_id>),
# Specified item will be added to any agent spawning on specified entry point.
# Mission/scene general operations
set_mission_result = 1906 # (set_mission_result, <value>),
# Sets the result of the current mission (1 for victory, -1 for defeat).
finish_mission = 1907 # (finish_mission, <delay_in_seconds>),
# Exits the scene after the specified delay.
set_jump_mission = 1911 # (set_jump_mission, <mission_template_id>),
# Tells the game to use the specified mission template for the next mission. Apparently should precede the call to (jump_to_scene).
jump_to_scene = 1910 # (jump_to_scene, <scene_id>, [entry_no]),
# Tells the game to use the specified scene for the next mission. Usually followed by (change_screen_mission) call. Parameter entry_no does not seem to have any effect.
set_jump_entry = 1912 # (set_jump_entry, <entry_no>),
# Defines what entry point the player will appear at when the mission starts.
store_current_scene = 2211 # (store_current_scene, <destination>),
# Retrieves the identifier of the current scene. Note that the operation will return the scene id even after the mission is completed and the player is already on global map.
close_order_menu = 1789 # (close_order_menu),
# Version 1.161+. If orders menu is currently open, it will be closed.
entry_point_get_position = 1780 # (entry_point_get_position, <position>, <entry_no>),
# Retrieves the position of the entry point on the scene.
entry_point_set_position = 1781 # (entry_point_set_position, <entry_no>, <position>),
# Moves the entry point to the specified position on the scene.
entry_point_is_auto_generated = 1782 # (entry_point_is_auto_generated, <entry_no>),
# Checks that the entry point is auto-generated (in other words, there was no such entry point placed in the scene file).
# Scene parameters handling
scene_set_day_time = 1266 # (scene_set_day_time, <value>),
# Defines the time for the scene to force the engine to select a different skybox than the one dictated by current game time. Must be called within ti_before_mission_start trigger in module_mission_templates.py. Value should be in range 0..23.
set_rain = 1797 # (set_rain, <rain-type>, <strength>),
# Sets a new weather for the mission. Rain_type values: 0 = clear, 1 = rain, 2 = snow. Strength is in range 0..100.
set_fog_distance = 1798 # (set_fog_distance, <distance_in_meters>, [fog_color]),
# Sets the density (and optionally color) of the fog for the mission.
set_skybox = 2389 # (set_skybox, <non_hdr_skybox_index>, <hdr_skybox_index>),
# Version 1.153+. Forces the scene to be rendered with specified skybox. Index of -1 will disable.
set_startup_sun_light = 2390 # (set_startup_sun_light, <r>, <g>, <b>),
# Version 1.153+. Defines the sunlight color for the scene.
set_startup_ambient_light = 2391 # (set_startup_ambient_light, <r>, <g>, <b>),
# Version 1.153+. Defines the ambient light color for the scene.
set_startup_ground_ambient_light = 2392 # (set_startup_ground_ambient_light, <r>, <g>, <b>),
# Version 1.153+. Defines the ambient light color for the ground.
get_startup_sun_light = 2394 # (get_startup_sun_light, <position_no>),
# Version 1.165+. Returns startup sunlight color in (x, y, z) coordinates of position register.
get_startup_ambient_light = 2395 # (get_startup_ambient_light, <position_no>),
# Version 1.165+. Returns startup ambient light color in (x, y, z) coordinates of position register.
get_startup_ground_ambient_light = 2396 # (get_startup_ground_ambient_light, <position_no>),
# Version 1.165+. Returns startup ambient ground lighting color in (x, y, z) coordinates of position register.
get_battle_advantage = 1690 # (get_battle_advantage, <destination>),
# Retrieves the calculated battle advantage.
set_battle_advantage = 1691 # (set_battle_advantage, <value>),
# Sets a new value for battle advantage.
get_scene_boundaries = 1799 # (get_scene_boundaries, <position_min>, <position_max>),
# Retrieves the coordinates of the top-left and bottom-right corner of the scene to the provided position registers.
mission_enable_talk = 1935 # (mission_enable_talk),
# Allows dialogue with agents on the scene.
mission_disable_talk = 1936 # (mission_disable_talk),
# Disables dialogue with agents on the scene.
mission_get_time_speed = 2002 # (mission_get_time_speed, <destination_fixed_point>),
# Retrieves current time speed factor for the mission.
mission_set_time_speed = 2003 # (mission_set_time_speed, <value_fixed_point>),
# Instantly changes the speed of time during the mission. Speed of time cannot be set to zero or below. Operation only works when cheat mode is enabled.
mission_time_speed_move_to_value = 2004 # (mission_speed_move_to_value, <value_fixed_point>, <duration-in-1/1000-seconds>),
# Changes the speed of time during the mission gradually, within the specified duration period. Speed of time cannot be set to zero or below. Operation only works when cheat mode is enabled.
mission_set_duel_mode = 2006 # (mission_set_duel_mode, <value>),
# Sets duel mode for the multiplayer mission. Values: 0 = off, 1 = on.
store_zoom_amount = 2220 # (store_zoom_amount, <destination_fixed_point>),
# Version 1.153+. Stores current zoom rate.
set_zoom_amount = 2221 # (set_zoom_amount, <value_fixed_point>),
# Version 1.153+. Sets new zoom rate.
# Mission timers
reset_mission_timer_a = 2375 # (reset_mission_timer_a),
# Resets the value of first mission timer and starts it from zero.
reset_mission_timer_b = 2376 # (reset_mission_timer_b),
# Resets the value of second mission timer and starts it from zero.
reset_mission_timer_c = 2377 # (reset_mission_timer_c),
# Resets the value of third mission timer and starts it from zero.
store_mission_timer_a = 2370 # (store_mission_timer_a, <destination>),
# Retrieves current value of first mission timer, in seconds.
store_mission_timer_b = 2371 # (store_mission_timer_b, <destination>),
# Retrieves current value of second mission timer, in seconds.
store_mission_timer_c = 2372 # (store_mission_timer_c, <destination>),
# Retrieves current value of third mission timer, in seconds.
store_mission_timer_a_msec = 2365 # (store_mission_timer_a_msec, <destination>),
# Retrieves current value of first mission timer, in milliseconds.
store_mission_timer_b_msec = 2366 # (store_mission_timer_b_msec, <destination>),
# Retrieves current value of second mission timer, in milliseconds.
store_mission_timer_c_msec = 2367 # (store_mission_timer_c_msec, <destination>),
# Retrieves current value of third mission timer, in milliseconds.
# Camera and rendering operations
mission_cam_set_mode = 2001 # (mission_cam_set_mode, <mission_cam_mode>, <duration-in-1/1000-seconds>, <value>),
# Not documented. Changes main camera mode. Camera mode is 0 for automatic and 1 for manual (controlled by code). Duration parameter is used when switching from manual to auto, to determine how long will camera move to it's new position. Third parameter is not documented.
mission_cam_set_screen_color = 2008 # (mission_cam_set_screen_color, <value>),
# Not documented. Paints the screen with solid color. Parameter <value> contains color code with alpha component. Can be used to block screen entirely, add tint etc.
mission_cam_animate_to_screen_color = 2009 #(mission_cam_animate_to_screen_color, <value>, <duration-in-1/1000-seconds>),
# Not documented. Same as above, but color change is gradual. Used in Native to fill the screen with white before the end of marriage scene.
mission_cam_get_position = 2010 # (mission_cam_get_position, <position_register_no>)
# Retrieves the current position of camera during the mission (i.e. the point from which the player is observing the game).
mission_cam_set_position = 2011 # (mission_cam_set_position, <position_register_no>)
# Moves the camera to the specified position during the mission.
mission_cam_animate_to_position = 2012 # (mission_cam_animate_to_position, <position_register_no>, <duration-in-1/1000-seconds>, <value>)
# Moves the camera to the specified position smoothly. Second parameter determines how long it will take camera to move to destination, third parameter determines whether camera velocity will be linear (value = 0) or non-linear (value = 1).
mission_cam_get_aperture = 2013 # (mission_cam_get_aperture, <destination>)
# Not documented. View angle?
mission_cam_set_aperture = 2014 # (mission_cam_set_aperture, <value>)
# Not documented.
mission_cam_animate_to_aperture = 2015 # (mission_cam_animate_to_aperture, <value>, <duration-in-1/1000-seconds>, <value>)
# Not documented. if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_animate_to_position_and_aperture = 2016 # (mission_cam_animate_to_position_and_aperture, <position_register_no>, <value>, <duration-in-1/1000-seconds>, <value>)
# Not documented. if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_set_target_agent = 2017 # (mission_cam_set_target_agent, <agent_id>, <value>)
# Not documented. if value = 0 then do not use agent's rotation, else use agent's rotation
mission_cam_clear_target_agent = 2018 # (mission_cam_clear_target_agent)
# Not documented.
mission_cam_set_animation = 2019 # (mission_cam_set_animation, <anim_id>),
# Not documented.
mouse_get_world_projection = 751 # (mouse_get_world_projection, <position_no_1>, <position_no_2>),
# Version 1.161+. Returns current camera coordinates (first position) and mouse projection to the back of the world (second position). Rotation data of resulting positions seems unreliable.
cast_ray = 1900 # (cast_ray, <destination>, <hit_position_register>, <ray_position_register>, [<ray_length_fixed_point>]),
# Version 1.161+. Casts a ray starting from <ray_position_register> and stores the closest hit position into <hit_position_register> (fails if no hits). If the body hit is a scene prop, its instance id will be stored into <destination>, otherwise it will be -1. Optional <ray_length> parameter seems to have no effect.
set_postfx = 2386 # (set_postfx, ???)
# This operation is not documented nor any examples of it's use could be found. Parameters are unknown.
set_river_shader_to_mud = 2387 # (set_river_shader_to_mud, ???)
# Changes river material for muddy env. This operation is not documented nor any examples of it's use could be found. Parameters are unknown.
rebuild_shadow_map = 2393 # (rebuild_shadow_map),
# Version 1.153+. UNTESTED. Effects unknown. Rebuilds shadow map for the current scene. Apparently useful after heavy manipulation with scene props.
set_shader_param_int = 2400 # (set_shader_param_int, <parameter_name>, <value>), #Sets the int shader parameter <parameter_name> to <value>
# Version 1.153+. UNTESTED. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is an int value.
set_shader_param_float = 2401 # (set_shader_param_float, <parameter_name>, <value_fixed_point>),
# Version 1.153+. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is a float value.
set_shader_param_float4 = 2402 # (set_shader_param_float4, <parameter_name>, <valuex>, <valuey>, <valuez>, <valuew>),
# Version 1.153+. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is a set of 4 float values.
set_shader_param_float4x4 = 2403 # (set_shader_param_float4x4, <parameter_name>, [0][0], [0][1], [0][2], [1][0], [1][1], [1][2], [2][0], [2][1], [2][2], [3][0], [3][1], [3][2]),
# Version 1.153+. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is a set of 4x4 float values.
################################################################################
# [ Z21 ] SCENE PROPS, SCENE ITEMS, LIGHT SOURCES AND PARTICLE SYSTEMS
################################################################################
# On each scene in the game you can find scene props and scene items.
# Scene props are the building bricks of the scene. Nearly every 3D object you
# will see on any scene in the game is a scene prop, with the exception of
# terrain and flora (on some scenes flora elements are actually scene props
# as well though).
# Just like with troops and agents, it is important to differentiate between
# scene props and scene prop instances. You can have a dozen archer agents on
# the scene, and each of them will be an instance of the archer troop. Scene
# props are the same - there can be many castle wall sections on the scene,
# and these are instances of the same castle wall scene prop.
# It is also possible to use game items as elements of the scene. These are
# the scene items, and they behave just like normal scene props. However all
# operations will affect either scene prop instances, or scene items, but
# not both.
# Finally, there are spawned items. These are the "dropped" items which the
# player can pick up during the mission.
# Conditional operations
prop_instance_is_valid = 1838 # (prop_instance_is_valid, <scene_prop_instance_id>),
# Checks that the reference to a scene prop instance is valid (i.e. it was not removed).
prop_instance_is_animating = 1862 # (prop_instance_is_animating, <destination>, <scene_prop_id>),
# Checks that the scene prop instance is currently animating.
prop_instance_intersects_with_prop_instance = 1880 # (prop_instance_intersects_with_prop_instance, <checked_scene_prop_id>, <scene_prop_id>),
# Checks if two scene props are intersecting (i.e. collided). Useful when animating scene props movement. Pass -1 for second parameter to check the prop against all other props on the scene.
scene_prop_has_agent_on_it = 1801 # (scene_prop_has_agent_on_it, <scene_prop_instance_id>, <agent_id>)
# Checks that the specified agent is standing on the scene prop instance.
# Scene prop instance slot operations
scene_prop_set_slot = 510 # (scene_prop_set_slot, <scene_prop_instance_id>, <slot_no>, <value>),
scene_prop_get_slot = 530 # (scene_prop_get_slot, <destination>, <scene_prop_instance_id>, <slot_no>),
scene_prop_slot_eq = 550 # (scene_prop_slot_eq, <scene_prop_instance_id>, <slot_no>, <value>),
scene_prop_slot_ge = 570 # (scene_prop_slot_ge, <scene_prop_instance_id>, <slot_no>, <value>),
# Scene prop general operations
prop_instance_get_scene_prop_kind = 1853 # (prop_instance_get_scene_prop_type, <destination>, <scene_prop_id>)
# Retrieves the scene prop for the specified prop instance.
scene_prop_get_num_instances = 1810 # (scene_prop_get_num_instances, <destination>, <scene_prop_id>),
# Retrieves the total number of instances of a specified scene prop on the current scene.
scene_prop_get_instance = 1811 # (scene_prop_get_instance, <destination>, <scene_prop_id>, <instance_no>),
# Retrieves the reference to a scene prop instance by it's number.
scene_prop_enable_after_time = 1800 # (scene_prop_enable_after_time, <scene_prop_id>, <time_period>),
# Prevents usable scene prop from being used for the specified time period in 1/100th of second. Commonly used to implement "cooldown" periods.
set_spawn_position = 1970 # (set_spawn_position, <position>),
# Defines the position which will later be used by (spawn_scene_prop), (spawn_scene_item), (spawn_agent) and (spawn_horse) operations.
spawn_scene_prop = 1974 # (spawn_scene_prop, <scene_prop_id>),
# Spawns a new scene prop instance of the specified type at the position defined by the last call to (set_spawn_position). Operation was supposed to store the prop_instance_id of the spawned position in reg0, but does not do this at the moment.
prop_instance_get_variation_id = 1840 # (prop_instance_get_variation_id, <destination>, <scene_prop_id>),
# Retrieves the first variation ID number for the specified scene prop instance.
prop_instance_get_variation_id_2 = 1841 # (prop_instance_get_variation_id_2, <destination>, <scene_prop_id>),
# Retrieves the second variation ID number for the specified scene prop instance.
replace_prop_instance = 1889 # (replace_prop_instance, <scene_prop_id>, <new_scene_prop_id>),
# Replaces a single scene prop instance with an instance of another scene prop (usually with the same dimensions, but not necessarily so). Can only be called in ti_before_mission_start trigger in module_mission_templates.py.
replace_scene_props = 1890 # (replace_scene_props, <old_scene_prop_id>, <new_scene_prop_id>),
# Replaces all instances of specified scene prop type with another scene prop type. Commonly used to replace damaged walls with their intact versions during normal visits to castle scenes. Can only be called in ti_before_mission_start trigger in module_mission_templates.py.
scene_prop_fade_out = 1822 # (scene_prop_fade_out, <scene_prop_id>, <fade_out_time>)
# Version 1.153+. Makes the scene prop instance disappear within specified time.
scene_prop_fade_in = 1823 # (scene_prop_fade_in, <scene_prop_id>, <fade_in_time>)
# Version 1.153+. Makes the scene prop instance reappear within specified time.
prop_instance_set_material = 2617 # (prop_instance_set_material, <prop_instance_no>, <sub_mesh_no>, <string_register>),
# Version 1.161+. 4research. give sub mesh as -1 to change all meshes' materials.
# Scene prop manipulation
scene_prop_get_visibility = 1812 # (scene_prop_get_visibility, <destination>, <scene_prop_id>),
# Retrieves the current visibility state of the scene prop instance (1 = visible, 0 = invisible).
scene_prop_set_visibility = 1813 # (scene_prop_set_visibility, <scene_prop_id>, <value>),
# Shows (value = 1) or hides (value = 0) the scene prop instance. What does it do with collision? 4research.
scene_prop_get_hit_points = 1815 # (scene_prop_get_hit_points, <destination>, <scene_prop_id>),
# Retrieves current number of hit points that the scene prop instance has.
scene_prop_get_max_hit_points = 1816 # (scene_prop_get_max_hit_points, <destination>, <scene_prop_id>),
# Retrieves the maximum number of hit points that the scene prop instance has (useful to calculate the percent of damage).
scene_prop_set_hit_points = 1814 # (scene_prop_set_hit_points, <scene_prop_id>, <value>),
# Sets the number of hit points that the scene prop has. Both current and max hit points are affected. Only makes sense for sokf_destructible scene props.
scene_prop_set_cur_hit_points = 1820 # (scene_prop_set_cur_hit_points, <scene_prop_id>, <value>),
# Version 1.153+. Sets current HP amount for scene prop.
prop_instance_receive_damage = 1877 # (prop_instance_receive_damage, <scene_prop_id>, <agent_id>, <damage_value>),
# Makes scene prop instance receive specified amount of damage from any arbitrary agent. Agent reference is apparently necessary to properly initialize ti_on_scene_prop_hit trigger parameters.
prop_instance_refill_hit_points = 1870 # (prop_instance_refill_hit_points, <scene_prop_id>),
# Restores hit points of a scene prop instance to their maximum value.
scene_prop_get_team = 1817 # (scene_prop_get_team, <value>, <scene_prop_id>),
# Retrieves the team controlling the scene prop instance.
scene_prop_set_team = 1818 # (scene_prop_set_team, <scene_prop_id>, <value>),
# Assigns the scene prop instance to a certain team.
scene_prop_set_prune_time = 1819 # (scene_prop_set_prune_time, <scene_prop_id>, <value>),
# Not documented. Not used in Native. Taleworlds comment: Prune time can only be set to objects that are already on the prune queue. Static objects are not affected by this operation.
prop_instance_get_position = 1850 # (prop_instance_get_position, <position>, <scene_prop_id>),
# Retrieves the prop instance current position on the scene.
prop_instance_get_starting_position = 1851 # (prop_instance_get_starting_position, <position>, <scene_prop_id>),
# Retrieves the prop instance starting position on the scene (i.e. the place where it was positioned when initialized).
prop_instance_set_position = 1855 # (prop_instance_set_position, <scene_prop_id>, <position>, [dont_send_to_clients]),
# Teleports prop instance to another position. Optional flag dont_send_to_clients can be used on the server to prevent position change from being replicated to client machines (useful when doing some calculations which require to move the prop temporarily to another place).
prop_instance_animate_to_position = 1860 # (prop_instance_animate_to_position, <scene_prop_id>, position, <duration-in-1/100-seconds>),
# Moves prop instance to another position during the specified time frame (i.e. animates). Time is specified in 1/100th of second.
prop_instance_get_animation_target_position = 1863 # (prop_instance_get_animation_target_position, <pos>, <scene_prop_id>)
# Retrieves the position that the prop instance is currently animating to.
prop_instance_stop_animating = 1861 # (prop_instance_stop_animating, <scene_prop_id>),
# Stops animating of the prop instance in the current position.
prop_instance_get_scale = 1852 # (prop_instance_get_scale, <position>, <scene_prop_id>),
# Retrieves the current scaling factors of the prop instance.
prop_instance_set_scale = 1854 # (prop_instance_set_scale, <scene_prop_id>, <value_x_fixed_point>, <value_y_fixed_point>, <value_z_fixed_point>),
# Sets new scaling factors for the scene prop.
prop_instance_enable_physics = 1864 # (prop_instance_enable_physics, <scene_prop_id>, <value>),
# Enables (value = 1) or disables (value = 0) physics calculation (gravity, collision checks) for the scene prop instance.
prop_instance_initialize_rotation_angles = 1866 # (prop_instance_initialize_rotation_angles, <scene_prop_id>),
# Should be called to initialize the scene prop instance prior to any calls to (prop_instance_rotate_to_position).
prop_instance_rotate_to_position = 1865 # (prop_instance_rotate_to_position, <scene_prop_id>, <position>, <duration-in-1/100-seconds>, <total_rotate_angle_fixed_point>),
# Specified prop instance will move to the target position within the specified duration of time, and within the same time it will rotate for the specified angle. Used in Native code to simulate behavior of belfry wheels and rotating winches.
prop_instance_clear_attached_missiles = 1885 # (prop_instance_clear_attached_missiles, <scene_prop_id>),
# Version 1.153+. Removes all missiles currently attached to the scene prop. Only works with dynamic scene props.
prop_instance_dynamics_set_properties = 1871 # (prop_instance_dynamics_set_properties, <scene_prop_id>, <position>),
# Initializes physical parameters of a scene prop. Position (X,Y) coordinates are used to store object's mass and friction coefficient. Coordinate Z is reserved (set it to zero just in case). Scene prop must be defined as sokf_moveable|sokf_dynamic_physics, and a call to (prop_instance_enable_physics) must be previously made.
prop_instance_dynamics_set_velocity = 1872 # (prop_instance_dynamics_set_velocity, <scene_prop_id>, <position>),
# Sets current movement speed for a scene prop. Position's coordinates define velocity along corresponding axis. Same comments as for (prop_instance_dynamics_set_properties).
prop_instance_dynamics_set_omega = 1873 # (prop_instance_dynamics_set_omega, <scene_prop_id>, <position>),
# Sets current rotation speed for a scene prop. Position's coordinates define rotational speed around corresponding axis. Same comments as for (prop_instance_dynamics_set_properties).
prop_instance_dynamics_apply_impulse = 1874 # (prop_instance_dynamics_apply_impulse, <scene_prop_id>, <position>),
# Applies an impulse of specified scale to the scene prop. Position's coordinates define instant change in movement speed along corresponding axis. Same comments as for (prop_instance_dynamics_set_properties).
prop_instance_deform_to_time = 2610 # (prop_instance_deform_to_time, <prop_instance_no>, <value>),
# Version 1.161+. Deforms a vertex-animated scene prop to specified vertex time. If you open the mesh in OpenBrf, right one of "Time of frame" boxes contains the relevant value.
prop_instance_deform_in_range = 2611 # (prop_instance_deform_in_range, <prop_instance_no>, <start_frame>, <end_frame>, <duration-in-1/1000-seconds>),
# Version 1.161+. Animate vertex-animated scene prop from start frame to end frame within the specified time period (in milliseconds). If you open the mesh in OpenBrf, right one of "Time of frame" boxes contains the relevant values for frame parameters.
prop_instance_deform_in_cycle_loop = 2612 # (prop_instance_deform_in_cycle_loop, <prop_instance_no>, <start_frame>, <end_frame>, <duration-in-1/1000-seconds>),
# Version 1.161+. Performs looping animation of vertex-animated scene prop within the specified vertex frame ranges and within specified time (in milliseconds). If you open the mesh in OpenBrf, right one of "Time of frame" boxes contains the relevant values for frame parameters.
prop_instance_get_current_deform_progress = 2615 # (prop_instance_get_current_deform_progress, <destination>, <prop_instance_no>),
# Version 1.161+. Returns a percentage value between 0 and 100 if animation is still in progress. Returns 100 otherwise.
prop_instance_get_current_deform_frame = 2616 # (prop_instance_get_current_deform_frame, <destination>, <prop_instance_no>),
# Version 1.161+. Returns current frame of a vertex-animated scene prop, rounded to nearest integer value.
prop_instance_play_sound = 1881 # (prop_instance_play_sound, <scene_prop_id>, <sound_id>, [flags]),
# Version 1.153+. Makes the scene prop play a specified sound. See sf_* flags in header_sounds.py for reference on possible options.
prop_instance_stop_sound = 1882 # (prop_instance_stop_sound, <scene_prop_id>),
# Version 1.153+. Stops any sound currently played by the scene prop instance.
# Scene items operations
scene_item_get_num_instances = 1830 # (scene_item_get_num_instances, <destination>, <item_id>),
# Gets the number of specified scene items present on the scene. Scene items behave exactly like scene props (i.e. cannot be picked).
scene_item_get_instance = 1831 # (scene_item_get_instance, <destination>, <item_id>, <instance_no>),
# Retrieves the reference to a single instance of a scene item by it's sequential number.
scene_spawned_item_get_num_instances = 1832 # (scene_spawned_item_get_num_instances, <destination>, <item_id>),
# Retrieves the number of specified spawned items present on the scene. Spawned items are actual items, i.e. they can be picked by player.
scene_spawned_item_get_instance = 1833 # (scene_spawned_item_get_instance, <destination>, <item_id>, <instance_no>),
# Retrieves the reference to a single instance of a spawned item by it's sequential number.
replace_scene_items_with_scene_props = 1891 # (replace_scene_items_with_scene_props, <old_item_id>, <new_scene_prop_id>),
# Replaces all instances of specified scene item with scene props. Can only be called in ti_before_mission_start trigger in module_mission_templates.py.
set_spawn_position = 1970 # (set_spawn_position, <position>), ## DUPLICATE ENTRY
# Defines the position which will later be used by (spawn_scene_prop), (spawn_scene_item), (spawn_agent) and (spawn_horse) operations.
spawn_item = 1971 # (spawn_item, <item_kind_id>, <item_modifier>, [seconds_before_pruning]),
# Spawns a new item, possibly with modifier, on the scene in the position specified by previous call to (set_spawn_position). Optional parameter determines time period (in second) after which the item will disappear. Using 0 will prevent the item from disappearing.
spawn_item_without_refill = 1976 # (spawn_item_without_refill, <item_kind_id>, <item_modifier>, [seconds_before_pruning]),
# Version 1.153+. UNTESTED. It is unclear how this is different from standard (spawn_item).
# Light sources and particle systems
set_current_color = 1950 # (set_current_color, <red_value>, <green_value>, <blue_value>),
# Sets color for subsequent calls to (add_point_light) etc. Color component ranges are 0..255.
set_position_delta = 1955 # (set_position_delta, <value>, <value>, <value>),
# Can only be called inside item or scene prop triggers. Sets (X,Y,Z) offsets from the item/prop current position for subsequent calls to (add_point_light) etc. Offsets are apparently in centimeters.
add_point_light = 1960 # (add_point_light, [flicker_magnitude], [flicker_interval]),
# Adds a point light source to an object with optional flickering magnitude (range 0..100) and flickering interval (in 1/100th of second). Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in item triggers.
add_point_light_to_entity = 1961 # (add_point_light_to_entity, [flicker_magnitude], [flicker_interval]),
# Adds a point light source to an object with optional flickering magnitude (range 0..100) and flickering interval (in 1/100th of second). Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in scene prop triggers.
particle_system_add_new = 1965 # (particle_system_add_new, <par_sys_id>,[position]),
# Adds a new particle system to an object. Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in item/prop triggers.
particle_system_emit = 1968 # (particle_system_emit, <par_sys_id>, <value_num_particles>, <value_period>),
# Adds a particle system in some fancy way. Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in item/prop triggers.
particle_system_burst = 1969 # (particle_system_burst, <par_sys_id>, <position>, [percentage_burst_strength]),
# Bursts a particle system in specified position.
particle_system_burst_no_sync = 1975 # (particle_system_burst_without_sync,<par_sys_id>,<position_no>,[percentage_burst_strength]),
# Version 1.153+. Same as above, but apparently does not synchronize this between server and client.
prop_instance_add_particle_system = 1886 # (prop_instance_add_particle_system, <scene_prop_id>, <par_sys_id>, <position_no>),
# Version 1.153+. Adds a new particle system to the scene prop. Note that <position_no> is local, i.e. in relation to scene prop's coordinates and rotation.
prop_instance_stop_all_particle_systems = 1887 # (prop_instance_stop_all_particle_systems, <scene_prop_id>),
# Version 1.153+. Removes all particle systems currently associated with scene prop instance.
################################################################################
# [ Z22 ] AGENTS AND TEAMS
################################################################################
# An agent represents of a single soldier on the 3D scene. Always keep this in
# mind when dealing with regular troops. A party may have 30 Swadian Knights.
# They will form a single troop stack in the party, and they will all be
# copies of the one and only Swadian Knight troop. However when the battle
# starts, this stack will spawn 30 distinct Agents.
# Agents do not persist - they only exist in the game for the duration of the
# mission. As soon as the player returns to the world map, all agents who were
# present on the scene immediately disappear. If this was a battle during a
# normal game encounter, then the game will keep track of the battle results,
# and depending on the number of agents killed from all sides the engine will
# kill or wound some troops in the troop stacks of the parties who were
# participating in the battle.
# During the mission, all agents are split into teams. By default player and
# his companions are placed into Team 0, but this may be changed in the
# mission template or by code. Player's enemies are usually team 1 (though
# again, this is not set in stone). Module System provides the modder with
# a great degree of control over teams composition, relation to each other
# (you can make hostile, allied or neutral teams, and you can have more than
# one team on the scene).
# Conditional operations
agent_is_in_special_mode = 1693 # (agent_is_in_special_mode, <agent_id>),
# Checks that the agent is currently in scripted mode.
agent_is_routed = 1699 # (agent_is_routed, <agent_id>),
# Checks that the agent has fled from the map (i.e. reached the edge of the map in fleeing mode and then faded).
agent_is_alive = 1702 # (agent_is_alive, <agent_id>),
# Checks that the agent is alive.
agent_is_wounded = 1703 # (agent_is_wounded, <agent_id>),
# Checks that the agent has been knocked unconscious.
agent_is_human = 1704 # (agent_is_human, <agent_id>),
# Checks that the agent is human (i.e. not horse).
agent_is_ally = 1706 # (agent_is_ally, <agent_id>),
# Checks that the agent is allied to the player (belongs to player's party or allied party in current encounter).
agent_is_non_player = 1707 # (agent_is_non_player, <agent_id>),
# Checks that the agent is not a player.
agent_is_defender = 1708 # (agent_is_defender, <agent_id>),
# Checks that the agent belongs to the defending side (see encounter operations for details).
agent_is_active = 1712 # (agent_is_active, <agent_id>),
# Checks that the agent reference is active. This will succeed for dead or routed agents, for as long as the agent reference itself is valid.
agent_has_item_equipped = 1729 # (agent_has_item_equipped, <agent_id>, <item_id>),
# Checks that the agent has a specific item equipped.
agent_is_in_parried_animation = 1769 # (agent_is_in_parried_animation, <agent_id>),
# Checks that the agent is currently in parrying animation (defending from some attack).
agent_is_alarmed = 1806 # (agent_is_alarmed, <agent_id>),
# Checks that the agent is alarmed (in combat mode with weapon drawn).
class_is_listening_order = 1775 # (class_is_listening_order, <team_no>, <sub_class>),
# Checks that the specified group of specified team is listening to player's orders.
teams_are_enemies = 1788 # (teams_are_enemies, <team_no>, <team_no_2>),
# Checks that the two teams are hostile to each other.
agent_is_in_line_of_sight = 1826 # (agent_is_in_line_of_sight, <agent_id>, <position_no>),
# Version 1.153+. Checks that the agent can be seen from specified position. Rotation of position register is not used (i.e. agent will be seen even if position is "looking" the other way).
# Team and agent slot operations
team_set_slot = 509 # (team_set_slot, <team_id>, <slot_no>, <value>),
team_get_slot = 529 # (team_get_slot, <destination>, <player_id>, <slot_no>),
team_slot_eq = 549 # (team_slot_eq, <team_id>, <slot_no>, <value>),
team_slot_ge = 569 # (team_slot_ge, <team_id>, <slot_no>, <value>),
agent_set_slot = 505 # (agent_set_slot, <agent_id>, <slot_no>, <value>),
agent_get_slot = 525 # (agent_get_slot, <destination>, <agent_id>, <slot_no>),
agent_slot_eq = 545 # (agent_slot_eq, <agent_id>, <slot_no>, <value>),
agent_slot_ge = 565 # (agent_slot_ge, <agent_id>, <slot_no>, <value>),
# Agent spawning, removal and general operations
add_reinforcements_to_entry = 1930 # (add_reinforcements_to_entry, <mission_template_entry_no>, <wave_size>),
# For battle missions, adds reinforcement wave to the specified entry point. Additional parameter determines relative wave size. Agents in reinforcement wave are taken from all parties of the side that the entry point belongs to due to mtef_team_* flags.
set_spawn_position = 1970 # (set_spawn_position, <position>), ## DUPLICATE ENTRY
# Defines the position which will later be used by (spawn_scene_prop), (spawn_scene_item), (spawn_agent) and (spawn_horse) operations.
spawn_agent = 1972 # (spawn_agent, <troop_id>),
# Spawns a new troop in the specified position and saves the reference to the new agent in reg0.
spawn_horse = 1973 # (spawn_horse, <item_kind_id>, <item_modifier>),
# Spawns a new horse (with any modifier) in the specified position and saves the reference to the new agent in reg0.
remove_agent = 1755 # (remove_agent, <agent_id>),
# Immediately removes the agent from the scene.
agent_fade_out = 1749 # (agent_fade_out, <agent_id>),
# Fades out the agent from the scene (same effect as fleeing enemies when they get to the edge of map).
agent_play_sound = 1750 # (agent_play_sound, <agent_id>, <sound_id>),
# Makes the agent emit the specified sound.
agent_stop_sound = 1808 # (agent_stop_sound, <agent_id>),
# Stops whatever sound agent is currently performing.
agent_set_visibility = 2096 # (agent_set_visibility, <agent_id>, <value>),
# Version 1.153+. Sets agent visibility. 0 for invisible, 1 for visible.
get_player_agent_no = 1700 # (get_player_agent_no, <destination>),
# Retrieves the reference to the player-controlled agent. Singleplayer mode only.
agent_get_kill_count = 1723 # (agent_get_kill_count, <destination>, <agent_id>, [get_wounded]),
# Retrieves the total number of kills by the specified agent during this battle. Call with non-zero <get_wounded> parameter to retrieve the total number of enemies the agent has knocked down.
agent_get_position = 1710 # (agent_get_position, <position>, <agent_id>),
# Retrieves the position of the specified agent on the scene.
agent_set_position = 1711 # (agent_set_position, <agent_id>, <position>),
# Teleports the agent to specified position on the scene. Be careful with riders - you must teleport the horse, not the rider for the operation to work correctly!
agent_get_horse = 1714 # (agent_get_horse, <destination>, <agent_id>),
# Retrieves the reference to the horse agent that the specified agent is riding, or -1 if he's not riding a horse (or is a horse himself).
agent_get_rider = 1715 # (agent_get_rider, <destination>, <horse_agent_id>),
# Retrieves the reference to the rider agent who is riding the specified horse, or -1 if there's no rider or the specified agent is not a horse.
agent_get_party_id = 1716 # (agent_get_party_id, <destination>, <agent_id>),
# Retrieves the party that the specified agent belongs to (supposedly should only work in battle missions for agents spawned as starting/reinforcement waves).
agent_get_entry_no = 1717 # (agent_get_entry_no, <destination>, <agent_id>),
# Retrieves the entry point number where this agent has spawned. What does this return for agents spawned with (spawn_agent)? 4research.
agent_get_troop_id = 1718 # (agent_get_troop_id, <destination>, <agent_id>),
# Retrieves the troop type of the specified agent. Returns -1 for horses (because horses are items, not troops).
agent_get_item_id = 1719 # (agent_get_item_id, <destination>, <horse_agent_id>),
# Retrieves the item type of the specified horse agent. Returns -1 for humans.
# Agent combat parameters and stats
store_agent_hit_points = 1720 # (store_agent_hit_points, <destination>, <agent_id>, [absolute]),
# Retrieves current agent health. Optional last parameter determines whether actual health (absolute = 1) or relative percentile health (absolute = 0) is returned. Default is relative.
agent_set_hit_points = 1721 # (agent_set_hit_points, <agent_id>, <value>,[absolute]),
# Sets new value for agent health. Optional last parameter determines whether the value is interpreted as actual health (absolute = 1) or relative percentile health (absolute = 0). Default is relative.
agent_set_max_hit_points = 2090 # (agent_set_max_hit_points, <agent_id>, <value>, [absolute]),
# Version 1.153+. Changes agent's max hit points. Optional flag [absolute] determines if <value> is an absolute number of his points, or relative percentage (0..1000) of default value. Treated as percentage by default.
agent_deliver_damage_to_agent = 1722 # (agent_deliver_damage_to_agent, <agent_id_deliverer>, <agent_id>, [damage_amount], [weapon_item_id]),
# Makes one agent deal damage to another. Parameter damage_amount is optional, if it is skipped or <= 0, then damage will be calculated using attacker's weapon item and stats (like a normal weapon attack). Optional parameter weapon_item_id was added in 1.153 and will force the game the calculate the damage using this weapon.
agent_deliver_damage_to_agent_advanced = 1827 # (agent_deliver_damage_to_agent_advanced, <destination>, <attacker_agent_id>, <agent_id>, <value>, [weapon_item_id]),
# Version 1.153+. Same as (agent_deliver_damage_to_agent), but resulting damage is returned. Also operation takes relations between agents into account, which may result in no damage, or even damage to attacker due to friendly fire rules.
add_missile = 1829 # (add_missile, <agent_id>, <starting_position>, <starting_speed_fixed_point>, <weapon_item_id>, <weapon_item_modifier>, <missile_item_id>, <missile_item_modifier>),
# Version 1.153+. Creates a missile with specified parameters. Note that <starting_position> parameter also determines the direction in which missile flies.
agent_get_speed = 1689 # (agent_get_speed, <position>, <agent_id>),
# Retrieves agent speed to (X,Y) coordinates of the position register. What do these mean - speed by world axis?
agent_set_no_death_knock_down_only = 1733 # (agent_set_no_death_knock_down_only, <agent_id>, <value>),
# Sets the agent as unkillable (value = 1) or normal (value = 0). Unkillable agents will drop on the ground instead of dying and will stand up afterwards.
agent_set_horse_speed_factor = 1734 # (agent_set_horse_speed_factor, <agent_id>, <speed_multiplier-in-1/100>),
# Multiplies agent's horse speed (and maneuverability?) by the specified percentile value (using 100 will make the horse). Note that this is called on the rider, not on the horse! Supposedly will persist even if the agent changes horses. 4research.
agent_set_speed_limit = 1736 # (agent_set_speed_limit, <agent_id>, <speed_limit(kilometers/hour)>),
# Limits agent speed by the specified value in kph. Use 5 for average walking speed. Affects only AI agents.
agent_set_damage_modifier = 2091 # (agent_set_damage_modifier, <agent_id>, <value>),
# Version 1.153+. Changes the damage delivered by this agent. Value is in percentage, 100 is default, 1000 is max possible value.
agent_set_accuracy_modifier = 2092 # (agent_set_accuracy_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's accuracy (with ranged weapons?). Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_speed_modifier = 2093 # (agent_set_speed_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's speed. Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_reload_speed_modifier = 2094 # (agent_set_reload_speed_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's reload speed. Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_use_speed_modifier = 2095 # (agent_set_use_speed_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's speed with using various scene props. Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_ranged_damage_modifier = 2099 # (agent_set_ranged_damage_modifier, <agent_id>, <value>),
# Version 1.157+. Changes agent's damage with ranged weapons. Value is in percentage, 100 is default, value can be between [0..1000]
agent_get_time_elapsed_since_removed = 1760 # (agent_get_time_elapsed_since_removed, <destination>, <agent_id>),
# Retrieves the number of seconds that have passed since agent's death. Native uses this only for multiplayer to track player's respawns. Can it be used in singleplayer too? 4research.
# Agent equipment
agent_refill_wielded_shield_hit_points = 1692 # (agent_refill_wielded_shield_hit_points, <agent_id>),
# Restores all hit points for the shield the agent is currently wielding.
agent_set_invulnerable_shield = 1725 # (agent_set_invulnerable_shield, <agent_id>, <value>),
# Makes the agent invulnerable to any damage (value = 1) or makes him vulnerable again (value = 0).
agent_get_wielded_item = 1726 # (agent_get_wielded_item, <destination>, <agent_id>, <hand_no>),
# Retrieves the item reference that the agent is currently wielding in his right hand (hand_no = 0) or left hand (hand_no = 1). Note that weapons are always wielded in right hand, and shield in left hand. When wielding a two-handed weapon (including bows and crossbows), this operation will return -1 for left hand.
agent_get_ammo = 1727 # (agent_get_ammo, <destination>, <agent_id>, <value>),
# Retrieves the current ammo amount agent has for his wielded item (value = 1) or all his items (value = 0).
agent_get_item_cur_ammo = 1977 # (agent_get_item_cur_ammo, <destination>, <agent_id>, <slot_no>),
# Version 1.153+. Returns remaining ammo for specified agent's item.
agent_refill_ammo = 1728 # (agent_refill_ammo, <agent_id>),
# Refills all ammo and throwing weapon stacks that the agent has in his equipment.
agent_set_wielded_item = 1747 # (agent_set_wielded_item, <agent_id>, <item_id>),
# Forces the agent to wield the specified item. Agent must have that item in his equipment for this to work. Use item_id = -1 to unwield any currently wielded item.
agent_equip_item = 1779 # (agent_equip_item, <agent_id>, <item_id>, [weapon_slot_no]),
# Adds the specified item to agent and forces him to equip it. Optional weapon_slot_no parameter is only used with weapons and will put the newly added item to that slot (range 1..4). If it is omitted with a weapon item, then the agent must have an empty weapon slot for the operation to succeed.
agent_unequip_item = 1774 # (agent_unequip_item, <agent_id>, <item_id>, [weapon_slot_no]),
# Removes the specified item from the agent. Optional parameter weapon_slot_no is in range 1..4 and determines what weapon slot to remove (item_id must still be set correctly).
agent_set_ammo = 1776 # (agent_set_ammo, <agent_id>, <item_id>, <value>),
# Sets current agent ammo amount to the specified value between 0 and maximum ammo. Not clear what item_id means - weapon item or ammo item? 4research.
agent_get_item_slot = 1804 # (agent_get_item_slot, <destination>, <agent_id>, <value>),
# Retrieves item_id for specified agent's slot Possible slot values range in 0..7, order is weapon1, weapon2, weapon3, weapon4, head_armor, body_armor, leg_armor, hand_armor.
agent_get_ammo_for_slot = 1825 # (agent_get_ammo_for_slot, <destination>, <agent_id>, <slot_no>),
# Retrieves the amount of ammo agent has in the referenced slot (range 0..3).
# Agent animations
agent_set_no_dynamics = 1762 # (agent_set_no_dynamics, <agent_id>, <value>),
# Makes the agent stand on the spot (value = 1) or move normally (value = 0). When frozen on the spot the agent can still turn around and fight if necessary. Used in Native for the wedding scene.
agent_get_animation = 1768 # (agent_get_animation, <destination>, <agent_id>, <body_part),
# Retrieves current agent animation for specified body part (0 = lower, 1 = upper).
agent_set_animation = 1740 # (agent_set_animation, <agent_id>, <anim_id>, [channel_no]),
# Forces the agent to perform the specified animation. Optional channel_no parameter determines whether upper body (value = 1) or lower body (value = 0, default) is affected by animation.
agent_set_stand_animation = 1741 # (agent_set_stand_action, <agent_id>, <anim_id>),
# Defines the animation that this agent will use when standing still. Does not force the agent into actually doing this animation.
agent_set_walk_forward_animation = 1742 # (agent_set_walk_forward_action, <agent_id>, <anim_id>),
# Defines the animation that this agent will use when walking forward. Only works for NPC agents.
agent_set_animation_progress = 1743 # (agent_set_animation_progress, <agent_id>, <value_fixed_point>),
# Allows to skip the agent to a certain point in the animation cycle, as specified by the fixed point value (0..fixed_point_multiplier).
agent_ai_set_can_crouch = 2083 # (agent_ai_set_can_crouch, <agent_id>, <value>),
# Version 1.153+. Allows or forbids the agent to crouch. 0 to forbid, 1 to allow.
agent_get_crouch_mode = 2097 # (agent_ai_get_crouch_mode, <destination>, <agent_id>),
# Version 1.153+. Retrieves agent's crouch status (1 = crouching, 0 = standing).
agent_set_crouch_mode = 2098 # (agent_ai_set_crouch_mode, <agent_id>, <value>),
# Version 1.153+. Sets agent's crouch status (1 = crouch, 0 = stand up).
agent_get_attached_scene_prop = 1756 # (agent_get_attached_scene_prop, <destination>, <agent_id>)
# Retrieves the reference to scene prop instance which is attached to the agent, or -1 if there isn't any.
agent_set_attached_scene_prop = 1757 # (agent_set_attached_scene_prop, <agent_id>, <scene_prop_id>)
# Attaches the specified prop instance to the agent. Used in multiplayer CTF missions to attach flags to players.
agent_set_attached_scene_prop_x = 1758 # (agent_set_attached_scene_prop_x, <agent_id>, <value>)
# Offsets the position of the attached scene prop in relation to agent, in centimeters, along the X axis (left/right).
agent_set_attached_scene_prop_y = 1809 # (agent_set_attached_scene_prop_y, <agent_id>, <value>)
# Offsets the position of the attached scene prop in relation to agent, in centimeters, along the Y axis (backwards/forward).
agent_set_attached_scene_prop_z = 1759 # (agent_set_attached_scene_prop_z, <agent_id>, <value>)
# Offsets the position of the attached scene prop in relation to agent, in centimeters, along the Z axis (down/up).
agent_get_bone_position = 2076 # (agent_get_bone_position, <position_no>, <agent_no>, <bone_no>, [<local_or_global>]),
# Version 1.161+. Returns current position for agent's bone (examine skeleton in openBrf to learn bone numbers). Pass 1 as optional <local_or_global> parameter to retrieve global bone coordinates.
# Agent AI and scripted behavior
agent_ai_set_interact_with_player = 2077 # (agent_ai_set_interact_with_player, <agent_no>, <value>),
# Version 1.165+. Enables or disables agent AI interation with player. Dialog? Combat? 4research.
agent_set_is_alarmed = 1807 # (agent_set_is_alarmed, <agent_id>, <value>),
# Sets agent's status as alarmed (value = 1) or peaceful (value = 0).
agent_clear_relations_with_agents = 1802 # (agent_clear_relations_with_agents, <agent_id>),
# Clears any agent-to-agent relations for specified agent.
agent_add_relation_with_agent = 1803 # (agent_add_relation_with_agent, <agent_id>, <agent_id>, <value>),
# Changes relations between two agents on the scene to enemy (value = -1), neutral (value = 0), ally (value = 1). Note that neutral agents are immune to friendly fire.
agent_get_number_of_enemies_following = 1761 # (agent_get_number_of_enemies_following, <destination>, <agent_id>),
# Retrieves the total number of enemies who are currently attacking the specified agents. May be used for AI decision-making.
agent_ai_get_num_cached_enemies = 2670 # (agent_ai_get_num_cached_enemies, <destination>, <agent_no>),
# Version 1.165+. Returns total number of nearby enemies as has been cached by agent AI. Enemies are numbered from nearest to farthest.
agent_ai_get_cached_enemy = 2671 # (agent_ai_get_cached_enemy, <destination>, <agent_no>, <cache_index>),
# Version 1.165+. Return agent reference from AI's list of cached enemies, from nearest to farthest. Returns -1 if the cached enemy is not active anymore.
agent_get_attack_action = 1763 # (agent_get_attack_action, <destination>, <agent_id>),
# Retrieves agent's current attack action. Possible values: free = 0, readying_attack = 1, releasing_attack = 2, completing_attack_after_hit = 3, attack_parried = 4, reloading = 5, after_release = 6, cancelling_attack = 7.
agent_get_defend_action = 1764 # (agent_get_defend_action, <destination>, <agent_id>),
# Retrieves agent's current defend action. Possible values: free = 0, parrying = 1, blocking = 2.
agent_get_action_dir = 1767 # (agent_get_action_dir, <destination>, <agent_id>),
# Retrieves the direction of current agent's action. Possible values: invalid = -1, down = 0, right = 1, left = 2, up = 3.
agent_set_attack_action = 1745 # (agent_set_attack_action, <agent_id>, <direction_value>, <action_value>),
# Forces the agent to perform an attack action. Direction value: -2 = cancel any action (1.153+), 0 = thrust, 1 = slashright, 2 = slashleft, 3 = overswing. Action value: 0 = ready and release, 1 = ready and hold.
agent_set_defend_action = 1746 # (agent_set_defend_action, <agent_id>, <value>, <duration-in-1/1000-seconds>),
# Forces the agent to perform a defend action. Possible values: -2 = cancel any action (1.153+), 0 = defend_down, 1 = defend_right, 2 = defend_left, 3 = defend_up. Does time value determine delay, speed or duration? 4research.
agent_set_scripted_destination = 1730 # (agent_set_scripted_destination, <agent_id>, <position>, [auto_set_z_to_ground_level], [no_rethink]),
# Forces the agent to travel to specified position and stay there until new behavior is set or scripted mode cleared. First optional parameter determines whether the position Z coordinate will be automatically set to ground level (value = 1) or not (value = 0). Second optional parameter added in 1.165 patch, set it to 1 to save resources.
agent_set_scripted_destination_no_attack = 1748 # (agent_set_scripted_destination_no_attack, <agent_id>, <position>, <auto_set_z_to_ground_level>),
# Same as above, but the agent will not attack his enemies.
agent_get_scripted_destination = 1731 # (agent_get_scripted_destination, <position>, <agent_id>),
# Retrieves the position which is defined as agent's scripted destination, if any.
agent_force_rethink = 1732 # (agent_force_rethink, <agent_id>),
# Forces the agent to recalculate his current actions after setting him a new scripted destination or changing other factors affecting his behavior.
agent_clear_scripted_mode = 1735 # (agent_clear_scripted_mode, <agent_id>),
# Clears scripting mode from the agent, making him behave as usual again.
agent_ai_set_always_attack_in_melee = 1737 # (agent_ai_set_always_attack_in_melee, <agent_id>, <value>),
# Forces the agent to continuously attack in melee combat, instead of defending. Used in Native to prevent stalling at the top of the siege ladder. Use value = 0 to clear this mode.
agent_get_simple_behavior = 1738 # (agent_get_simple_behavior, <destination>, <agent_id>),
# Retrieves agent's current simple behavior (see aisb_* constants in header_mission_templates.py for details).
agent_ai_get_behavior_target = 2082 # (agent_ai_get_behavior_target, <destination>, <agent_id>),
# Version 1.153+. UNTESTED. Supposedly returns agent_id which is the target of current agent's behavior.
agent_get_combat_state = 1739 # (agent_get_combat_state, <destination>, <agent_id>),
# Retrieves agent's current combat state:
# 0 = nothing special, this value is also always returned for player and for dead agents.
# 1 = target in sight (for ranged units)
# 2 = guarding (without a shield)
# 3 = preparing a melee attack or firing a ranged weapon
# 4 = releasing a melee attack or reloading a crossbow
# 7 = recovering after being hit in melee OR blocking with a shield. Contradictory information, 4research.
# 8 = target to the right (horse archers) OR no target in sight (ranged units). Contradictory information, 4research.
agent_ai_get_move_target = 2081 # (agent_ai_get_move_target, <destination>, <agent_id>),
# Version 1.153+. UNTESTED. Supposedly returns the enemy agent to whom the agent is currently moving to.
agent_get_look_position = 1709 # (agent_get_look_position, <position>, <agent_id>),
# Retrieves the position that the agent is currently looking at.
agent_set_look_target_position = 1744 # (agent_set_look_target_position, <agent_id>, <position>),
# Forces the agent to look at specified position (turn his head as necessary). Alarmed agents will ignore this.
agent_ai_get_look_target = 2080 # (agent_ai_get_look_target, <destination>, <agent_id>),
# Version 1.153+. UNTESTED. Supposedly returns agent_id that the agent is currently looking at.
agent_set_look_target_agent = 1713 # (agent_set_look_target_agent, <watcher_agent_id>, <observed_agent_id>),
# Forces the agent to look at specified agent (track his movements). Alarmed agents will ignore this.
agent_start_running_away = 1751 # (agent_start_running_away, <agent_id>, [<position_no>]),
# Makes the agent flee the battlefield, ignoring everything else and not attacking. If the agent reaches the edge of map in this mode, he will fade out. Optional position_no parameter added in 1.153 and will make the agent flee to specified position instead (pos0 is not allowed and will be ignored).
agent_stop_running_away = 1752 # (agent_stop_run_away, <agent_id>),
# Cancels fleeing behavior for the agent, turning him back to combat state.
agent_ai_set_aggressiveness = 1753 # (agent_ai_set_aggressiveness, <agent_id>, <value>),
# Sets the aggressiveness parameter for agent AI to use. Default value is 100. Higher values make agent more aggressive. Actual game effects are not obvious, apparently used to speed up mob aggravation when previously neutral.
agent_set_kick_allowed = 1754 # (agent_set_kick_allowed, <agent_id>, <value>),
# Enables (value = 1) or disables (value = 0) kicking for the specified agent. Only makes sense for player-controlled agents as bots don't know how to kick anyway.
set_cheer_at_no_enemy = 2379 # (set_cheer_at_no_enemy, <value>),
# Version 1.153+. Determines whether the agents will cheer when no enemy remain on the map. 0 = do not cheer, 1 = cheer.
agent_add_offer_with_timeout = 1777 # (agent_add_offer_with_timeout, <agent_id>, <offerer_agent_id>, <duration-in-1/1000-seconds>),
# Esoteric stuff. Used in multiplayer duels. Second agent_id is offerer, 0 value for duration is an infinite offer.
agent_check_offer_from_agent = 1778 # (agent_check_offer_from_agent, <agent_id>, <offerer_agent_id>), #second agent_id is offerer
# Esoteric stuff. Used in multiplayer duels. Second agent_id is offerer.
# Team operations
agent_get_group = 1765 # (agent_get_group, <destination>, <agent_id>),
# Retrieves reference to player who is currently the leader of specified bot agent. Only works in multiplayer.
agent_set_group = 1766 # (agent_set_group, <agent_id>, <player_leader_id>),
# Puts the bot agent under command of specified player. Only works in multiplayer.
agent_get_team = 1770 # (agent_get_team, <destination>, <agent_id>),
# Retrieves the team that the agent belongs to.
agent_set_team = 1771 # (agent_set_team, <agent_id>, <value>),
# Puts the agent to specified team number.
agent_get_class = 1772 # (agent_get_class , <destination>, <agent_id>),
# Retrieves the agent class (see grc_* constants in header_mission_templates.py for reference). Note this operation returns the troop class that the game divines from troop equipment and flags, ignoring any custom troop class settings.
agent_get_division = 1773 # (agent_get_division , <destination>, <agent_id>),
# Retrieves the agent division (custom troop class number in 0..8 range).
agent_set_division = 1783 # (agent_set_division, <agent_id>, <value>),
# Puts the agent into the specified division. This does not affect agent's troop class. Note that there's a bug in Warband: if an order is issued to agent's original division, the agent will immediately switch back to it's original division number. Therefore, if you want to manipulate agent divisions dynamically during the battle, you need to implement some workarounds for this bug.
team_get_hold_fire_order = 1784 # (team_get_hold_fire_order, <destination>, <team_no>, <division>),
# Retrieves current status of hold fire order for specified team/division (see aordr_* constants in header_mission_templates.py for reference).
team_get_movement_order = 1785 # (team_get_movement_order, <destination>, <team_no>, <division>),
# Retrieves current movement orders for specified team/division (see mordr_* constants in header_mission_templates.py for reference).
team_get_riding_order = 1786 # (team_get_riding_order, <destination>, <team_no>, <division>),
# Retrieves current status of riding order for specified team/division (see rordr_* constants in header_mission_templates.py for reference).
team_get_weapon_usage_order = 1787 # (team_get_weapon_usage_order, <destination>, <team_no>, <division>),
# Retrieves current status of weapon usage order for specified team/division (see wordr_* constants in header_mission_templates.py for reference).
team_give_order = 1790 # (team_give_order, <team_no>, <division>, <order_id>),
# Issues an order to specified team/division.
team_set_order_position = 1791 # (team_set_order_position, <team_no>, <division>, <position>),
# Defines the position for specified team/division when currently issued order requires one.
team_get_leader = 1792 # (team_get_leader, <destination>, <team_no>),
# Retrieves the reference to the agent who is the leader of specified team.
team_set_leader = 1793 # (team_set_leader, <team_no>, <new_leader_agent_id>),
# Sets the agent as the new leader of specified team.
team_get_order_position = 1794 # (team_get_order_position, <position>, <team_no>, <division>),
# Retrieves position which is used for specified team/division current orders.
team_set_order_listener = 1795 # (team_set_order_listener, <team_no>, <division>, [add_to_listeners]),
# Set the specified division as the one which will be following orders issued by the player (assuming the player is on the same team). If optional parameter add_to_listeners is greater than 0, then the operation will instead *add* specified division to order listeners. If division number is -1, then list of order listeners is cleared. If division number is 9, then all divisions will listen to player's orders.
team_set_relation = 1796 # (team_set_relation, <team_no>, <team_no_2>, <value>),
# Sets relations between two teams. Possible values: enemy (-1), neutral (0) and friendly (1).
store_remaining_team_no = 2360 # (store_remaining_team_no, <destination>),
# Retrieves the number of the last remaining team. Currently not used in Native, possibly deprecated.
team_get_gap_distance = 1828 # (team_get_gap_distance, <destination>, <team_no>, <sub_class>),
# Version 1.153+. UNTESTED. Supposedly returns average gap between troops of a specified team/class (depends on how many Stand Closer/Spread Out orders were given).
# Combat statistics
store_enemy_count = 2380 # (store_enemy_count, <destination>),
# No longer used in Native. Apparently stores total number of active enemy agents. Possibly deprecated. 4research.
store_friend_count = 2381 # (store_friend_count, <destination>),
# No longer used in Native. Apparently stores total number of active friendly agents. Possibly deprecated. 4research.
store_ally_count = 2382 # (store_ally_count, <destination>),
# No longer used in Native. Apparently stores total number of active allied agents (how is it different from friends?). Possibly deprecated. 4research.
store_defender_count = 2383 # (store_defender_count, <destination>),
# No longer used in Native. Apparently stores total number of active agents on defender's side. Possibly deprecated. 4research.
store_attacker_count = 2384 # (store_attacker_count, <destination>),
# No longer used in Native. Apparently stores total number of active agents on attacker's side. Possibly deprecated. 4research.
store_normalized_team_count = 2385 # (store_normalized_team_count, <destination>, <team_no>),
# Stores the number of agents belonging to specified team, normalized according to battle_size and advantage. Commonly used to calculate advantage and possibly reinforcement wave sizes.
################################################################################
# [ Z23 ] PRESENTATIONS
################################################################################
# Presentations are a complex subject, because of their flexibility. Each
# presentation is nothing more but a number of screen control elements, called
# overlays. There are many types of overlays, each coming with it's own
# behavior and looks. For as long as the presentation is running, you can
# monitor the status of those overlays and change their looks, contents and
# position on the screen.
# Presentation is nothing but a set of triggers. There are only five triggers
# that the presentation can have, but skillful control of them allows you to
# do nearly everything you can think of.
# ti_on_presentation_load fires only once when the presentation is started.
# This is the place where you will usually create all overlays that your
# presentation needs, initialize their looks and contents and put them to
# their positions on the screen.
# ti_on_presentation_event_state_change is probably the most important and
# easy one. It fires every time some overlay in your presentation changes
# state. For each type of overlay this means something. For a button overlay,
# this means that the user has clicked the button. In this case, you will want
# to run the code responsible for that button effects. So you can put a "Win"
# button on your presentation, and when it's clicked, you can run the code
# which will give all castles and towns in the game to you. :-)
# ti_on_presentation_mouse_press trigger fires every time user clicks a mouse
# button on one of presentation overlays, even if the overlay did not change
# it's state as the result.
# ti_on_presentation_mouse_enter_leave trigger fires when the mouse pointer
# moves over one of presentation's overlays, or moves out of it. This might
# be useful if you want your presentation to react to user's mouse movements,
# not only clicks.
# ti_on_presentation_run trigger will fire every frame (in other words, with
# the frequency of your game FPS). You can put some code in this trigger if
# you want your presentation to constantly do something even if the user is
# passive.
# Note that while a running presentation will usually pause your game until
# you stop it, it is also possible to write presentations which will not stop
# the game, but will run as the time goes. To see an example, go into any
# battle in Warband and press Backspace. You will see the interface which
# displays the mini-map of the battle, positions of all troops, and elements
# that you can use to issue orders to your companions (if you have any). All
# this is a presentation as well, called "prsnt_battle". And if you have
# played multiplayer, then you might be interested to know that all menus,
# including equipment selection for your character, are presentations as well.
# Conditional operations
is_presentation_active = 903 # (is_presentation_active, <presentation_id),
# Checks that the specified presentation is currently running.
# General presentation operations
start_presentation = 900 # (start_presentation, <presentation_id>),
# Starts the specified presentation.
start_background_presentation = 901 # (start_background_presentation, <presentation_id>),
# Apparently allows you to start a presentation in background but stay in the menu. 4research.
presentation_set_duration = 902 # (presentation_set_duration, <duration-in-1/100-seconds>),
# Sets presentation duration time, in 1/100th of second. Must be called when a presentation is active. If several presentations are active, duration will be set for all of them.
# Creating overlays
create_text_overlay = 910 # (create_text_overlay, <destination>, <string_id>),
# Creates a text label overlay and returns it's overlay_id.
create_mesh_overlay = 911 # (create_mesh_overlay, <destination>, <mesh_id>),
# Creates a mesh overlay and returns it's overlay_id.
create_mesh_overlay_with_item_id = 944 # (create_mesh_overlay_with_item_id, <destination>, <item_id>),
# Creates a mesh overlay, using the specified item mesh. Returns overlay_id.
create_mesh_overlay_with_tableau_material = 939 # (create_mesh_overlay_with_tableau_material, <destination>, <mesh_id>, <tableau_material_id>, <value>),
# Creates a mesh overlay, using the specified tableau_material. When mesh_id = -1, it is generated automatically. Value is passed as the parameter for tableau_material script. Returns overlay_id.
create_button_overlay = 912 # (create_button_overlay, <destination>, <string_id>),
# Creates a generic button overlay and returns it's overlay_id. The only difference between this and subsequent two operations is that they use different button meshes.
create_game_button_overlay = 940 # (create_game_button_overlay, <destination>, <string_id>),
# Creates a game button overlay and returns it's overlay_id.
create_in_game_button_overlay = 941 # (create_in_game_button_overlay, <destination>, <string_id>),
# Creates an in-game button overlay and returns it's overlay_id.
create_image_button_overlay = 913 # (create_image_button_overlay, <destination>, <mesh_id>, <mesh_id>),
# Creates an image button, using two meshes for normal (1st mesh) and pressed (2nd mesh) status. Button does not have a textual label. Returns button overlay_id.
create_image_button_overlay_with_tableau_material = 938 # (create_image_button_overlay_with_tableau_material, <destination>, <mesh_id>, <tableau_material_id>, <value>),
# Creates an image button from the specified mesh, using tableau_material as the image. When mesh = -1, it is generated automatically. Value is passed as the parameter to the tableau_material script. Returns overlay_id.
create_slider_overlay = 914 # (create_slider_overlay, <destination>, <min_value>, <max_value>),
# Creates horizontal slider overlay, with positions of the slider varying between min and max values. Current value of the slider can be changed with (overlay_set_val). Returns slider's overlay_id.
create_progress_overlay = 915 # (create_progress_overlay, <destination>, <min_value>, <max_value>),
# Creates progress bar overlay, with positions of the bar varying between min and max values. Current value of the progress bar can be changed with (overlay_set_val). Returns bar's overlay_id.
create_number_box_overlay = 942 # (create_number_box_overlay, <destination>, <min_value>, <max_value>),
# Creates a number box overlay (a small field for numeric value and small increase/decrease buttons to the right) with specified min and max values. Returns number box overlay_id.
create_text_box_overlay = 917 # (create_text_box_overlay, <destination>),
# Apparently deprecated. No longer used in Native.
create_simple_text_box_overlay = 919 # (create_simple_text_box_overlay, <destination>),
# Creates a text field overlay, where user can enter any text. Returns text field's overlay_id. Text contents of the field can be retrieved from s0 trigger in ti_on_presentation_event_state_change event for the text field.
create_check_box_overlay = 918 # (create_check_box_overlay, <destination>, <checkbox_off_mesh>, <checkbox_on_mesh>),
# Creates a checkbox overlay. Returns checkbox overlay_id.
create_listbox_overlay = 943 # (create_list_box_overlay, <destination>, <string>, <value>),
# Creates a listbox overlay. Individual items can be added with (overlay_add_item) and index of currently selected item can be set with (overlay_set_val). Returns listbox overlay_id. Importance of later two parameters unclear (default text&value?). 4research.
create_combo_label_overlay = 948 # (create_combo_label_overlay, <destination>),
# Creates a combo label overlay. Looks like plain text label. Individual items can be added with (overlay_add_item) and currently selected item can be set with (overlay_set_val). Returns combo block's overlay_id.
create_combo_button_overlay = 916 # (create_combo_button_overlay, <destination>),
# Creates a combo button overlay. For example see "Screen Resolution" dropdown in Settings menu. Individual items can be added with (overlay_add_item) and currently selected item can be set with (overlay_set_val). Returns combo block's overlay_id.
overlay_add_item = 931 # (overlay_add_item, <overlay_id>, <string_id>),
# Adds an item to the listbox or combobox. Items are indexed from 0. Note the order in which items appear in the dropdown is reverse to the order in which they're added.
# Overlays hierarchy manipulation
set_container_overlay = 945 # (set_container_overlay, <overlay_id>),
# Defines the specified overlay as the container. All subsequently created overlays will be placed inside the container, and their coordinates will be based on container's position. All containers with their contents will be displayed *above* any non-container overlays. Use -1 to stop placing overlays to current container and resume normal behavior.
overlay_set_container_overlay = 951 # (overlay_set_container_overlay, <overlay_id>, <container_overlay_id>),
# Allows you to put one overlay into a container, or remove it from container (if container_overlay_id = -1) without setting current overlay. May be unreliable.
# Overlay manipulation
overlay_get_position = 946 # (overlay_get_position, <position>, <overlay_id>)
# Retrieves overlay current position to specified position trigger, using position's X and Y coordinates. Note that the screen size in Warband is (1.00,0.75), further modified by fixed point multiplier.
overlay_set_val = 927 # (overlay_set_val, <overlay_id>, <value>),
# Sets the value of the overlays which have numeric values.
overlay_set_text = 920 # (overlay_set_text, <overlay_id>, <string_id>),
# Changes the overlay text (if it has any). Works for labels, text fields, buttons with text labels...
overlay_set_boundaries = 928 # (overlay_set_boundaries, <overlay_id>, <min_value>, <max_value>),
# Changes the value boundaries for the overlays that have them.
overlay_set_position = 926 # (overlay_set_position, <overlay_id>, <position>),
# Sets the overlay position on the screen, using position's X and Y coordinates. Note that the screen size in Warband is (1.00,0.75), further modified by fixed point multiplier.
overlay_set_size = 925 # (overlay_set_size, <overlay_id>, <position>),
# Sets the overlay size, using position's X and Y coordinates. Note that the screen size in Warband is (1.00,0.75), further modified by fixed point multiplier. Also see (overlay_set_area_size).
overlay_set_area_size = 929 # (overlay_set_area_size, <overlay_id>, <position>),
# Defines the actual area on the screen used to display the overlay. If it's size is greater than area size, it will create a scrollable area with appropriate scrollbars. Can be used to create scrollable areas for large text, or scrollable containers with many children elements (see Host Game screen for a typical example).
overlay_set_additional_render_height = 952 # (overlay_set_additional_render_height, <overlay_id>, <height_adder>),
# Version 1.153+. Effects uncertain. 4research.
overlay_animate_to_position = 937 # (overlay_animate_to_position, <overlay_id>, <duration-in-1/1000-seconds>, <position>),
# Moves overlay to specified position during a specified timeframe, specified in 1/1000th of second.
overlay_animate_to_size = 936 # (overlay_animate_to_size, <overlay_id>, <duration-in-1/1000-seconds>, <position>),
# Changes overlay size to specified value during a specified timeframe, specified in 1/1000th of second.
overlay_set_mesh_rotation = 930 # (overlay_set_mesh_rotation, <overlay_id>, <position>),
# Despite the name, works with any overlay, allowing you to put it on the screen in rotated position. To determine the angles, position's rotation values are used (not coordinates!). Usually you will want to only use rotation around Z axis (which results in clockwise or anti-clockwise rotation as seen by user). Note that rotating overlays which are placed inside a container may cause strange results, so some trial and error will be necessary in such situation.
overlay_set_material = 956 # (overlay_set_material, <overlay_id>, <string_no>),
# Version 1.161+. Replaces the material used for rendering specified overlay.
overlay_set_color = 921 # (overlay_set_color, <overlay_id>, <color>),
# Changes the overlay color (hexadecimal value 0xRRGGBB). May not work with some overlay types.
overlay_set_alpha = 922 # (overlay_set_alpha, <overlay_id>, <alpha>),
# Changes the overlay alpha (hexadecimal value in 0x00..0xFF range). May not work with some overlay types.
overlay_set_hilight_color = 923 # (overlay_set_hilight_color, <overlay_id>, <color>),
# Highlights the overlay with specified color. May not work with some overlay types.
overlay_set_hilight_alpha = 924 # (overlay_set_hilight_alpha, <overlay_id>, <alpha>),
# Highlights the overlay with specified alpha. May not work with some overlay types.
overlay_animate_to_color = 932 # (overlay_animate_to_color, <overlay_id>, <duration-in-1/1000-seconds>, <color>)
# Changes overlay's color during a specified timeframe, specified in 1/000th of second.
overlay_animate_to_alpha = 933 # (overlay_animate_to_alpha, <overlay_id>, <duration-in-1/1000-seconds>, <color>),
# Changes overlay's alpha during a specified timeframe, specified in 1/000th of second.
overlay_animate_to_highlight_color = 934 # (overlay_animate_to_highlight_color, <overlay_id>, <duration-in-1/1000-seconds>, <color>),
# Highlights overlay to specified color during a specified timeframe, specified in 1/000th of second.
overlay_animate_to_highlight_alpha = 935 # (overlay_animate_to_highlight_alpha, <overlay_id>, <duration-in-1/1000-seconds>, <color>),
# Highlights overlay to specified alpha during a specified timeframe, specified in 1/000th of second.
overlay_set_display = 947 # (overlay_set_display, <overlay_id>, <value>),
# Shows (value = 1) or hides (value = 0) the specified overlay.
overlay_obtain_focus = 949 # (overlay_obtain_focus, <overlay_id>),
# Makes the specified overlay obtain input focus. Only works for text fields.
overlay_set_tooltip = 950 # (overlay_set_tooltip, <overlay_id>, <string_id>),
# Defines a text which will be displayed as a tooltip when mouse pointer will hover over the specified overlay. Unreliable, always test how it works.
# Popups and some esoteric stuff
show_item_details = 970 # (show_item_details, <item_id>, <position>, <price_multiplier_percentile>),
# Shows a popup box at the specified position, containing standard game information for the specified item. Last parameter determines price percentile multiplier. Multiplier value of 100 will display item standard price, value of 0 will display "Default Item" instead of price (used in multiplayer equipment selection presentation).
show_item_details_with_modifier = 972 # (show_item_details_with_modifier, <item_id>, <item_modifier>, <position>, <price_multiplier_percentile>),
# Same as above, but displays stats and price information for an item with a modifier.
close_item_details = 971 # (close_item_details)
# Closes the item details popup box.
show_troop_details = 2388 # (show_troop_details, <troop_id>, <position>, <troop_price>)
# Version 1.153+. Supposedly displays a popup with troop information at specified place. 4research.
################################################################################
# [ Z24 ] MULTIPLAYER AND NETWORKING (LEFT FOR SOMEONE MORE FAMILIAR WITH THIS)
################################################################################
# This section is eagerly waiting for someone to write documentation comments.
# Conditional operations
player_is_active = 401 # (player_is_active, <player_id>),
# Checks that the specified player is active (i.e. connected to server).
multiplayer_is_server = 417 # (multiplayer_is_server),
# Checks that the code is running on multiplayer server. Operation will fail on client machines or in singleplayer mode.
multiplayer_is_dedicated_server = 418 # (multiplayer_is_dedicated_server),
# Checks that the code is running on dedicated multiplayer server machine.
game_in_multiplayer_mode = 419 # (game_in_multiplayer_mode),
# Checks that the game is running in multiplayer mode.
player_is_admin = 430 # (player_is_admin, <player_id>),
# Checks that the specified player has administrative rights.
player_is_busy_with_menus = 438 # (player_is_busy_with_menus, <player_id>),
# Undocumented. Educated guess is it's true when player is running a presentation without prsntf_read_only flag.
player_item_slot_is_picked_up = 461 # (player_item_slot_is_picked_up, <player_id>, <item_slot_no>),
# Checks that the specified player's equipment slot contains an item that the player has picked up from ground.
# Player slot operations
player_set_slot = 508 # (player_set_slot, <player_id>, <slot_no>, <value>),
player_get_slot = 528 # (player_get_slot, <destination>, <player_id>, <slot_no>),
player_slot_eq = 548 # (player_slot_eq, <player_id>, <slot_no>, <value>),
player_slot_ge = 568 # (player_slot_ge, <player_id>, <slot_no>, <value>),
# Network communication operations
send_message_to_url = 380 # (send_message_to_url, <string_id>, <encode_url>),
# Sends an HTTP request. Response from that URL will be returned to "script_game_receive_url_response". Parameter <encode_url> is optional and effects are unclear. Supposedly it's equivalent of calling (str_encode_url) on the first parameter which doesn't make sense for me.
multiplayer_send_message_to_server = 388 # (multiplayer_send_message_to_server, <message_type>),
# Multiplayer client operation. Send a simple message (only message code, no data) to game server.
multiplayer_send_int_to_server = 389 # (multiplayer_send_int_to_server, <message_type>, <value>),
# Multiplayer client operation. Send a message with a single extra integer value to game server.
multiplayer_send_2_int_to_server = 390 # (multiplayer_send_2_int_to_server, <message_type>, <value>, <value>),
# Same as (multiplayer_send_int_to_server), but two integer values are sent.
multiplayer_send_3_int_to_server = 391 # (multiplayer_send_3_int_to_server, <message_type>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_server), but three integer values are sent.
multiplayer_send_4_int_to_server = 392 # (multiplayer_send_4_int_to_server, <message_type>, <value>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_server), but four integer values are sent.
multiplayer_send_string_to_server = 393 # (multiplayer_send_string_to_server, <message_type>, <string_id>),
# Multiplayer client operation. Send a message with a string value to game server.
multiplayer_send_message_to_player = 394 # (multiplayer_send_message_to_player, <player_id>, <message_type>),
# Multiplayer server operation. Send a simple message (only message code, no data) to one of connected players.
multiplayer_send_int_to_player = 395 # (multiplayer_send_int_to_player, <player_id>, <message_type>, <value>),
# Multiplayer server operation. Send a message with a single extra integer value to one of connected players.
multiplayer_send_2_int_to_player = 396 # (multiplayer_send_2_int_to_player, <player_id>, <message_type>, <value>, <value>),
# Same as (multiplayer_send_int_to_player), but two integer values are sent.
multiplayer_send_3_int_to_player = 397 # (multiplayer_send_3_int_to_player, <player_id>, <message_type>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_player), but three integer values are sent.
multiplayer_send_4_int_to_player = 398 # (multiplayer_send_4_int_to_player, <player_id>, <message_type>, <value>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_player), but four integer values are sent.
multiplayer_send_string_to_player = 399 # (multiplayer_send_string_to_player, <player_id>, <message_type>, <string_id>),
# Multiplayer server operation. Send a message with a string value to one of connected players.
# Player handling operations
get_max_players = 400 # (get_max_players, <destination>),
# Returns maximum possible number of connected players. Apparently always returns a constant value, however it's return value can change as maximum increases with new patches.
player_get_team_no = 402 # (player_get_team_no, <destination>, <player_id>),
# Retrieves player's selected team.
player_set_team_no = 403 # (player_get_team_no, <player_id>, <team_id>),
# Assigns a player to the specified team.
player_get_troop_id = 404 # (player_get_troop_id, <destination>, <player_id>),
# Retrieves player's selected troop reference.
player_set_troop_id = 405 # (player_get_troop_id, <player_id>, <troop_id>),
# Assigns the selected troop reference to a player.
player_get_agent_id = 406 # (player_get_agent_id, <destination>, <player_id>),
# Retrieves player's current agent reference. Returns a negative value if player has no agent.
agent_get_player_id = 1724 # (agent_get_player_id, <destination>, <agent_id>),
# Retrieves player reference that is currently controlling the specified agent.
player_get_gold = 407 # (player_get_gold, <destination>, <player_id>),
# Retrieves player's current gold amount.
player_set_gold = 408 # (player_set_gold, <player_id>, <value>, <max_value>),
# Sets player's new gold amount and maximum allowed gold amount. Use 0 for <max_value> to remove gold limit.
player_spawn_new_agent = 409 # (player_spawn_new_agent, <player_id>, <entry_point>),
# Spawns a new agent for the specified player. Essentially a combination of (spawn_agent) and (player_control_agent) operations.
player_add_spawn_item = 410 # (player_add_spawn_item, <player_id>, <item_slot_no>, <item_id>),
#
multiplayer_get_my_team = 411 # (multiplayer_get_my_team, <destination>),
# Client operation. Retrieves player's currently selected team.
multiplayer_get_my_troop = 412 # (multiplayer_get_my_troop, <destination>),
# Client operation. Retrieves player's currently selected troop.
multiplayer_set_my_troop = 413 # (multiplayer_get_my_troop, <destination>),
# Client operation. Selects a new troop for the player.
multiplayer_get_my_gold = 414 # (multiplayer_get_my_gold, <destination>),
# Client operation. Retrieves current player's gold amount.
multiplayer_get_my_player = 415 # (multiplayer_get_my_player, <destination>),
# Client operation. Retrieves current player's player_id reference.
multiplayer_make_everyone_enemy = 420 # (multiplayer_make_everyone_enemy),
# Used in deathmatch mode to make everyone hostile to all other agents.
player_control_agent = 421 # (player_control_agent, <player_id>, <agent_id>),
# Server operation. Puts the agent under specified player's control. Operation will change agent's face code and banner to those of player.
player_get_item_id = 422 # (player_get_item_id, <destination>, <player_id>, <item_slot_no>),
# Server operation. Retrieves item that's currently equipped by specified player in <item_slot_no> equipment slot.
player_get_banner_id = 423 # (player_get_banner_id, <destination>, <player_id>),
# Server operation. Retrieves banner_id reference used by the specified player. Note that in MP banners are enumerated starting from 0 (unlike single-player where they're enumeration depends on scene prop banners' reference range).
player_set_is_admin = 429 # (player_set_is_admin, <player_id>, <value>),
# Server operation. Set the current player as admin (value = 1) or not (value = 0).
player_get_score = 431 # (player_get_score, <destination>, <player_id>),
#
player_set_score = 432 # (player_set_score, <player_id>, <value>),
#
player_get_kill_count = 433 # (player_get_kill_count, <destination>, <player_id>),
#
player_set_kill_count = 434 # (player_set_kill_count, <player_id>, <value>),
#
player_get_death_count = 435 # (player_get_death_count, <destination>, <player_id>),
#
player_set_death_count = 436 # (player_set_death_count, <player_id>, <value>),
#
player_get_ping = 437 # (player_get_ping, <destination>, <player_id>),
#
player_get_is_muted = 439 # (player_get_is_muted, <destination>, <player_id>),
#
player_set_is_muted = 440 # (player_set_is_muted, <player_id>, <value>, [mute_for_everyone]), #mute_for_everyone optional parameter should be set to 1 if player is muted for everyone (this works only on server).
#
player_get_unique_id = 441 # (player_get_unique_id, <destination>, <player_id>), #can only bew used on server side
# Server operation. Retrieves player's unique identifier which is determined by player's game license code. This number is supposed to be unique for each license, allowing reliable player identification across servers.
player_get_gender = 442 # (player_get_gender, <destination>, <player_id>),
#
player_save_picked_up_items_for_next_spawn = 459 # (player_save_picked_up_items_for_next_spawn, <player_id>),
#
player_get_value_of_original_items = 460 # (player_get_value_of_original_items, <player_id>),
# Undocumented. Official docs: this operation returns values of the items, but default troop items will be counted as zero (except horse)
profile_get_banner_id = 350 # (profile_get_banner_id, <destination>),
# Client operation. Retrieves banner_id reference used by the game for multiplayer. Note that in MP banners are enumerated starting from 0 (unlike single-player where they're enumeration depends on scene prop banners' reference range).
profile_set_banner_id = 351 # (profile_set_banner_id, <value>),
# Client operation. Assigns a new banner_id to be used for multiplayer. Note that in MP banners are enumerated starting from 0 (unlike single-player where they're enumeration depends on scene prop banners' reference range).
# Team handling operations
team_get_bot_kill_count = 450 # (team_get_bot_kill_count, <destination>, <team_id>),
#
team_set_bot_kill_count = 451 # (team_get_bot_kill_count, <destination>, <team_id>),
#
team_get_bot_death_count = 452 # (team_get_bot_death_count, <destination>, <team_id>),
#
team_set_bot_death_count = 453 # (team_get_bot_death_count, <destination>, <team_id>),
#
team_get_kill_count = 454 # (team_get_kill_count, <destination>, <team_id>),
#
team_get_score = 455 # (team_get_score, <destination>, <team_id>),
#
team_set_score = 456 # (team_set_score, <team_id>, <value>),
#
team_set_faction = 457 # (team_set_faction, <team_id>, <faction_id>),
#
team_get_faction = 458 # (team_get_faction, <destination>, <team_id>),
#
# General scene and mission handling operations
multiplayer_clear_scene = 416 # (multiplayer_clear_scene),
#
multiplayer_find_spawn_point = 425 # (multiplayer_find_spawn_point, <destination>, <team_no>, <examine_all_spawn_points>, <is_horseman>),
#
set_spawn_effector_scene_prop_kind = 426 # (set_spawn_effector_scene_prop_kind, <team_no>, <scene_prop_kind_no>),
# Specifies some scene prop kind as one of the teams' spawn effector, making players of that team more likely to spawn closer to the specified effector prop instances. Use -1 to disable spawn effector for a team.
set_spawn_effector_scene_prop_id = 427 # (set_spawn_effector_scene_prop_id, <team_no>, <scene_prop_id>),
# Specifies a single prop instance as a team's spawn effector. Different from (set_spawn_effector_scene_prop_kind) as other instances of the same scene prop will not affect player spawning.
start_multiplayer_mission = 470 # (start_multiplayer_mission, <mission_template_id>, <scene_id>, <started_manually>),
#
# Administrative operations and settings
kick_player = 465 # (kick_player, <player_id>),
#
ban_player = 466 # (ban_player, <player_id>, <value>, <player_id>),
# Official docs: set value = 1 for banning temporarily, assign 2nd player id as the administrator player id if banning is permanent
save_ban_info_of_player = 467 # (save_ban_info_of_player, <player_id>),
#
ban_player_using_saved_ban_info = 468 # (ban_player_using_saved_ban_info),
#
server_add_message_to_log = 473 # (server_add_message_to_log, <string_id>),
#
server_get_renaming_server_allowed = 475 # (server_get_renaming_server_allowed, <destination>),
# Official docs: 0-1
server_get_changing_game_type_allowed = 476 # (server_get_changing_game_type_allowed, <destination>),
# Official docs: 0-1
server_get_combat_speed = 478 # (server_get_combat_speed, <destination>),
# Official docs: 0-2
server_set_combat_speed = 479 # (server_set_combat_speed, <value>),
# Official docs: 0-2
server_get_friendly_fire = 480 # (server_get_friendly_fire, <destination>),
#
server_set_friendly_fire = 481 # (server_set_friendly_fire, <value>),
# Official docs: 0 = off, 1 = on
server_get_control_block_dir = 482 # (server_get_control_block_dir, <destination>),
#
server_set_control_block_dir = 483 # (server_set_control_block_dir, <value>),
# Official docs: 0 = automatic, 1 = by mouse movement
server_set_password = 484 # (server_set_password, <string_id>),
#
server_get_add_to_game_servers_list = 485 # (server_get_add_to_game_servers_list, <destination>),
#
server_set_add_to_game_servers_list = 486 # (server_set_add_to_game_servers_list, <value>),
#
server_get_ghost_mode = 487 # (server_get_ghost_mode, <destination>),
#
server_set_ghost_mode = 488 # (server_set_ghost_mode, <value>),
#
server_set_name = 489 # (server_set_name, <string_id>),
#
server_get_max_num_players = 490 # (server_get_max_num_players, <destination>),
#
server_set_max_num_players = 491 # (server_set_max_num_players, <value>),
#
server_set_welcome_message = 492 # (server_set_welcome_message, <string_id>),
#
server_get_melee_friendly_fire = 493 # (server_get_melee_friendly_fire, <destination>),
#
server_set_melee_friendly_fire = 494 # (server_set_melee_friendly_fire, <value>),
# Official docs: 0 = off, 1 = on
server_get_friendly_fire_damage_self_ratio = 495 # (server_get_friendly_fire_damage_self_ratio, <destination>),
#
server_set_friendly_fire_damage_self_ratio = 496 # (server_set_friendly_fire_damage_self_ratio, <value>),
# Official docs: 0-100
server_get_friendly_fire_damage_friend_ratio = 497 # (server_get_friendly_fire_damage_friend_ratio, <destination>),
#
server_set_friendly_fire_damage_friend_ratio = 498 # (server_set_friendly_fire_damage_friend_ratio, <value>),
# Official docs: 0-100
server_get_anti_cheat = 499 # (server_get_anti_cheat, <destination>),
#
server_set_anti_cheat = 477 # (server_set_anti_cheat, <value>),
# Official docs: 0 = off, 1 = on
################################################################################
# [ Z25 ] REMAINING ESOTERIC STUFF (NO IDEA WHAT IT DOES)
################################################################################
# Honestly, I have no idea what these functions could be used for. If you
# know, please let me know ASAP! :-)
set_tooltip_text = 1130 # (set_tooltip_text, <string_id>),
ai_mesh_face_group_show_hide = 1805 # (ai_mesh_face_group_show_hide, <group_no>, <value>), # 1 for enable, 0 for disable
auto_set_meta_mission_at_end_commited = 1305 # (auto_set_meta_mission_at_end_commited), Not documented. Not used in Native. Was (simulate_battle, <value>) before.
################################################################################
# [ Z26 ] HARDCODED COMPILER-RELATED CODE
################################################################################
# Do not touch this stuff unless necessary. Module System compiler needs this
# code to correctly compile your module into format that Warband understands.
lhs_operations = [try_for_range, try_for_range_backwards, try_for_parties, try_for_agents, store_script_param_1, store_script_param_2, store_script_param, store_repeat_object,
get_global_cloud_amount, get_global_haze_amount, options_get_damage_to_player, options_get_damage_to_friends, options_get_combat_ai, options_get_campaign_ai, options_get_combat_speed,
profile_get_banner_id, get_achievement_stat, get_max_players, player_get_team_no, player_get_troop_id, player_get_agent_id, player_get_gold, multiplayer_get_my_team,
multiplayer_get_my_troop, multiplayer_get_my_gold, multiplayer_get_my_player, player_get_score, player_get_kill_count, player_get_death_count, player_get_ping, player_get_is_muted,
player_get_unique_id, player_get_gender, player_get_item_id, player_get_banner_id, game_get_reduce_campaign_ai, multiplayer_find_spawn_point, team_get_bot_kill_count,
team_get_bot_death_count, team_get_kill_count, team_get_score, team_get_faction, player_get_value_of_original_items, server_get_renaming_server_allowed,
server_get_changing_game_type_allowed, server_get_friendly_fire, server_get_control_block_dir, server_get_combat_speed, server_get_add_to_game_servers_list, server_get_ghost_mode,
server_get_max_num_players, server_get_melee_friendly_fire, server_get_friendly_fire_damage_self_ratio, server_get_friendly_fire_damage_friend_ratio, server_get_anti_cheat, troop_get_slot,
party_get_slot, faction_get_slot, scene_get_slot, party_template_get_slot, agent_get_slot, quest_get_slot, item_get_slot, player_get_slot, team_get_slot, scene_prop_get_slot,
store_last_sound_channel, get_angle_between_positions, get_distance_between_positions, get_distance_between_positions_in_meters, get_sq_distance_between_positions,
get_sq_distance_between_positions_in_meters, get_sq_distance_between_position_heights, position_get_x, position_get_y, position_get_z, position_get_scale_x,
position_get_scale_y, position_get_scale_z, position_get_rotation_around_z, position_normalize_origin, position_get_rotation_around_x, position_get_rotation_around_y,
position_get_distance_to_terrain, position_get_distance_to_ground_level, create_text_overlay, create_mesh_overlay, create_button_overlay, create_image_button_overlay, create_slider_overlay,
create_progress_overlay, create_combo_button_overlay, create_text_box_overlay, create_check_box_overlay, create_simple_text_box_overlay, create_image_button_overlay_with_tableau_material,
create_mesh_overlay_with_tableau_material, create_game_button_overlay, create_in_game_button_overlay, create_number_box_overlay, create_listbox_overlay, create_mesh_overlay_with_item_id,
overlay_get_position, create_combo_label_overlay, get_average_game_difficulty, get_level_boundary, faction_get_color, troop_get_type, troop_get_xp, troop_get_class,
troop_inventory_slot_get_item_amount, troop_inventory_slot_get_item_max_amount, troop_get_inventory_capacity, troop_get_inventory_slot, troop_get_inventory_slot_modifier,
troop_get_upgrade_troop, item_get_type, party_get_num_companions, party_get_num_prisoners, party_get_current_terrain, party_get_template_id, party_count_members_of_type,
party_count_companions_of_type, party_count_prisoners_of_type, party_get_free_companions_capacity, party_get_free_prisoners_capacity, party_get_helpfulness, party_get_ai_initiative,
party_get_num_companion_stacks, party_get_num_prisoner_stacks, party_stack_get_troop_id, party_stack_get_size, party_stack_get_num_wounded, party_stack_get_troop_dna,
party_prisoner_stack_get_troop_id, party_prisoner_stack_get_size, party_prisoner_stack_get_troop_dna, party_get_cur_town, party_get_morale, party_get_battle_opponent, party_get_icon,
party_get_skill_level, get_battle_advantage, party_get_attached_to, party_get_num_attached_parties, party_get_attached_party_with_rank, get_player_agent_no, get_player_agent_kill_count,
get_player_agent_own_troop_kill_count, agent_get_horse, agent_get_rider, agent_get_party_id, agent_get_entry_no, agent_get_troop_id, agent_get_item_id, store_agent_hit_points,
agent_get_kill_count, agent_get_player_id, agent_get_wielded_item, agent_get_ammo, agent_get_simple_behavior, agent_get_combat_state, agent_get_attached_scene_prop,
agent_get_time_elapsed_since_removed, agent_get_number_of_enemies_following, agent_get_attack_action, agent_get_defend_action, agent_get_group, agent_get_action_dir, agent_get_animation,
agent_get_team, agent_get_class, agent_get_division, team_get_hold_fire_order, team_get_movement_order, team_get_riding_order, team_get_weapon_usage_order, team_get_leader,
agent_get_item_slot, scene_prop_get_num_instances, scene_prop_get_instance, scene_prop_get_visibility, scene_prop_get_hit_points, scene_prop_get_max_hit_points, scene_prop_get_team,
agent_get_ammo_for_slot, agent_deliver_damage_to_agent_advanced, team_get_gap_distance, scene_item_get_num_instances, scene_item_get_instance, scene_spawned_item_get_num_instances,
scene_spawned_item_get_instance, prop_instance_get_variation_id, prop_instance_get_variation_id_2, prop_instance_get_position, prop_instance_get_starting_position, prop_instance_get_scale,
prop_instance_get_scene_prop_kind, prop_instance_is_animating, prop_instance_get_animation_target_position, agent_get_item_cur_ammo, mission_get_time_speed, mission_cam_get_aperture,
store_trigger_param, store_trigger_param_1, store_trigger_param_2, store_trigger_param_3, agent_ai_get_look_target, agent_ai_get_move_target, agent_ai_get_behavior_target,
agent_get_crouch_mode, store_or, store_and, store_mod, store_add, store_sub, store_mul, store_div, store_sqrt, store_pow, store_sin, store_cos, store_tan, assign, store_random,
store_random_in_range, store_asin, store_acos, store_atan, store_atan2, store_troop_gold, store_num_free_stacks, store_num_free_prisoner_stacks, store_party_size,
store_party_size_wo_prisoners, store_troop_kind_count, store_num_regular_prisoners, store_troop_count_companions, store_troop_count_prisoners, store_item_kind_count,
store_free_inventory_capacity, store_skill_level, store_character_level, store_attribute_level, store_troop_faction, store_troop_health, store_proficiency_level, store_relation,
store_conversation_agent, store_conversation_troop, store_partner_faction, store_encountered_party, store_encountered_party2, store_faction_of_party, store_current_scene, store_zoom_amount,
store_item_value, store_troop_value, store_partner_quest, store_random_quest_in_range, store_random_troop_to_raise, store_random_troop_to_capture, store_random_party_in_range,
store_random_horse, store_random_equipment, store_random_armor, store_quest_number, store_quest_item, store_quest_troop, store_current_hours, store_time_of_day, store_current_day,
store_distance_to_party_from_party, get_party_ai_behavior, get_party_ai_object, get_party_ai_current_behavior, get_party_ai_current_object, store_num_parties_created,
store_num_parties_destroyed, store_num_parties_destroyed_by_player, store_num_parties_of_template, store_random_party_of_template, store_remaining_team_no, store_mission_timer_a_msec,
store_mission_timer_b_msec, store_mission_timer_c_msec, store_mission_timer_a, store_mission_timer_b, store_mission_timer_c, store_enemy_count, store_friend_count, store_ally_count,
store_defender_count, store_attacker_count, store_normalized_team_count, item_get_weight, item_get_value, item_get_difficulty, item_get_head_armor, item_get_body_armor, item_get_leg_armor,
item_get_hit_points, item_get_weapon_length, item_get_speed_rating, item_get_missile_speed, item_get_max_ammo, item_get_accuracy, item_get_shield_height, item_get_horse_scale,
item_get_horse_speed, item_get_horse_maneuver, item_get_food_quality, item_get_abundance, item_get_thrust_damage, item_get_thrust_damage_type, item_get_swing_damage,
item_get_swing_damage_type, item_get_horse_charge_damage, try_for_prop_instances, options_get_battle_size, party_get_ignore_with_player_party, cast_ray,
prop_instance_get_current_deform_progress, prop_instance_get_current_deform_frame, face_keys_get_hair, face_keys_get_beard, face_keys_get_face_texture, face_keys_get_hair_texture,
face_keys_get_hair_color, face_keys_get_age, face_keys_get_skin_color, face_keys_get_morph_key, try_for_players, get_operation_set_version, get_startup_sun_light, get_startup_ambient_light,
get_startup_ground_ambient_light, agent_ai_get_num_cached_enemies, agent_ai_get_cached_enemy, ]
global_lhs_operations = [val_lshift, val_rshift, val_add, val_sub, val_mul, val_div, val_max, val_min, val_mod, ]
can_fail_operations = [ge, eq, gt, is_between, entering_town, map_free, encountered_party_is_attacker, conversation_screen_is_active, in_meta_mission, troop_is_hero, troop_is_wounded,
key_is_down, key_clicked, game_key_is_down, game_key_clicked, hero_can_join, hero_can_join_as_prisoner, party_can_join, party_can_join_as_prisoner, troops_can_join,
troops_can_join_as_prisoner, party_can_join_party, main_party_has_troop, party_is_in_town, party_is_in_any_town, party_is_active, player_has_item, troop_has_item_equipped, troop_is_mounted,
troop_is_guarantee_ranged, troop_is_guarantee_horse, player_is_active, multiplayer_is_server, multiplayer_is_dedicated_server, game_in_multiplayer_mode, player_is_admin,
player_is_busy_with_menus, player_item_slot_is_picked_up, check_quest_active, check_quest_finished, check_quest_succeeded, check_quest_failed, check_quest_concluded, is_trial_version,
is_edit_mode_enabled, troop_slot_eq, party_slot_eq, faction_slot_eq, scene_slot_eq, party_template_slot_eq, agent_slot_eq, quest_slot_eq, item_slot_eq, player_slot_eq, team_slot_eq,
scene_prop_slot_eq, troop_slot_ge, party_slot_ge, faction_slot_ge, scene_slot_ge, party_template_slot_ge, agent_slot_ge, quest_slot_ge, item_slot_ge, player_slot_ge, team_slot_ge,
scene_prop_slot_ge, position_has_line_of_sight_to_position, position_is_behind_position, is_presentation_active, all_enemies_defeated, race_completed_by_player, num_active_teams_le,
main_hero_fallen, lt, neq, le, teams_are_enemies, agent_is_alive, agent_is_wounded, agent_is_human, agent_is_ally, agent_is_non_player, agent_is_defender, agent_is_active, agent_is_routed,
agent_is_in_special_mode, agent_is_in_parried_animation, class_is_listening_order, agent_check_offer_from_agent, entry_point_is_auto_generated, scene_prop_has_agent_on_it, agent_is_alarmed,
agent_is_in_line_of_sight, scene_prop_get_instance, scene_item_get_instance, scene_allows_mounted_units, prop_instance_is_valid, prop_instance_intersects_with_prop_instance,
agent_has_item_equipped, map_get_land_position_around_position, map_get_water_position_around_position, is_zoom_disabled, is_currently_night, store_random_party_of_template, str_is_empty,
item_has_property, item_has_capability, item_has_modifier, item_has_faction, cast_ray, ]
depth_operations = [try_begin, try_for_range, try_for_range_backwards, try_for_parties, try_for_agents, try_for_prop_instances, try_for_players, ]
| agpl-3.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-5.4/scripts/gdb/linux/timerlist.py | 520 | 7731 | # SPDX-License-Identifier: GPL-2.0
#
# Copyright 2019 Google LLC.
import binascii
import gdb
from linux import constants
from linux import cpus
from linux import rbtree
from linux import utils
timerqueue_node_type = utils.CachedType("struct timerqueue_node").get_type()
hrtimer_type = utils.CachedType("struct hrtimer").get_type()
def ktime_get():
"""Returns the current time, but not very accurately
We can't read the hardware timer itself to add any nanoseconds
that need to be added since we last stored the time in the
timekeeper. But this is probably good enough for debug purposes."""
tk_core = gdb.parse_and_eval("&tk_core")
return tk_core['timekeeper']['tkr_mono']['base']
def print_timer(rb_node, idx):
timerqueue = utils.container_of(rb_node, timerqueue_node_type.pointer(),
"node")
timer = utils.container_of(timerqueue, hrtimer_type.pointer(), "node")
function = str(timer['function']).split(" ")[1].strip("<>")
softexpires = timer['_softexpires']
expires = timer['node']['expires']
now = ktime_get()
text = " #{}: <{}>, {}, ".format(idx, timer, function)
text += "S:{:02x}\n".format(int(timer['state']))
text += " # expires at {}-{} nsecs [in {} to {} nsecs]\n".format(
softexpires, expires, softexpires - now, expires - now)
return text
def print_active_timers(base):
curr = base['active']['next']['node']
curr = curr.address.cast(rbtree.rb_node_type.get_type().pointer())
idx = 0
while curr:
yield print_timer(curr, idx)
curr = rbtree.rb_next(curr)
idx += 1
def print_base(base):
text = " .base: {}\n".format(base.address)
text += " .index: {}\n".format(base['index'])
text += " .resolution: {} nsecs\n".format(constants.LX_hrtimer_resolution)
text += " .get_time: {}\n".format(base['get_time'])
if constants.LX_CONFIG_HIGH_RES_TIMERS:
text += " .offset: {} nsecs\n".format(base['offset'])
text += "active timers:\n"
text += "".join([x for x in print_active_timers(base)])
return text
def print_cpu(hrtimer_bases, cpu, max_clock_bases):
cpu_base = cpus.per_cpu(hrtimer_bases, cpu)
jiffies = gdb.parse_and_eval("jiffies_64")
tick_sched_ptr = gdb.parse_and_eval("&tick_cpu_sched")
ts = cpus.per_cpu(tick_sched_ptr, cpu)
text = "cpu: {}\n".format(cpu)
for i in xrange(max_clock_bases):
text += " clock {}:\n".format(i)
text += print_base(cpu_base['clock_base'][i])
if constants.LX_CONFIG_HIGH_RES_TIMERS:
fmts = [(" .{} : {} nsecs", 'expires_next'),
(" .{} : {}", 'hres_active'),
(" .{} : {}", 'nr_events'),
(" .{} : {}", 'nr_retries'),
(" .{} : {}", 'nr_hangs'),
(" .{} : {}", 'max_hang_time')]
text += "\n".join([s.format(f, cpu_base[f]) for s, f in fmts])
text += "\n"
if constants.LX_CONFIG_TICK_ONESHOT:
fmts = [(" .{} : {}", 'nohz_mode'),
(" .{} : {} nsecs", 'last_tick'),
(" .{} : {}", 'tick_stopped'),
(" .{} : {}", 'idle_jiffies'),
(" .{} : {}", 'idle_calls'),
(" .{} : {}", 'idle_sleeps'),
(" .{} : {} nsecs", 'idle_entrytime'),
(" .{} : {} nsecs", 'idle_waketime'),
(" .{} : {} nsecs", 'idle_exittime'),
(" .{} : {} nsecs", 'idle_sleeptime'),
(" .{}: {} nsecs", 'iowait_sleeptime'),
(" .{} : {}", 'last_jiffies'),
(" .{} : {}", 'next_timer'),
(" .{} : {} nsecs", 'idle_expires')]
text += "\n".join([s.format(f, ts[f]) for s, f in fmts])
text += "\njiffies: {}\n".format(jiffies)
text += "\n"
return text
def print_tickdevice(td, cpu):
dev = td['evtdev']
text = "Tick Device: mode: {}\n".format(td['mode'])
if cpu < 0:
text += "Broadcast device\n"
else:
text += "Per CPU device: {}\n".format(cpu)
text += "Clock Event Device: "
if dev == 0:
text += "<NULL>\n"
return text
text += "{}\n".format(dev['name'])
text += " max_delta_ns: {}\n".format(dev['max_delta_ns'])
text += " min_delta_ns: {}\n".format(dev['min_delta_ns'])
text += " mult: {}\n".format(dev['mult'])
text += " shift: {}\n".format(dev['shift'])
text += " mode: {}\n".format(dev['state_use_accessors'])
text += " next_event: {} nsecs\n".format(dev['next_event'])
text += " set_next_event: {}\n".format(dev['set_next_event'])
members = [('set_state_shutdown', " shutdown: {}\n"),
('set_state_periodic', " periodic: {}\n"),
('set_state_oneshot', " oneshot: {}\n"),
('set_state_oneshot_stopped', " oneshot stopped: {}\n"),
('tick_resume', " resume: {}\n")]
for member, fmt in members:
if dev[member]:
text += fmt.format(dev[member])
text += " event_handler: {}\n".format(dev['event_handler'])
text += " retries: {}\n".format(dev['retries'])
return text
def pr_cpumask(mask):
nr_cpu_ids = 1
if constants.LX_NR_CPUS > 1:
nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids")
inf = gdb.inferiors()[0]
bits = mask['bits']
num_bytes = (nr_cpu_ids + 7) / 8
buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
buf = binascii.b2a_hex(buf)
chunks = []
i = num_bytes
while i > 0:
i -= 1
start = i * 2
end = start + 2
chunks.append(buf[start:end])
if i != 0 and i % 4 == 0:
chunks.append(',')
extra = nr_cpu_ids % 8
if 0 < extra <= 4:
chunks[0] = chunks[0][0] # Cut off the first 0
return "".join(chunks)
class LxTimerList(gdb.Command):
"""Print /proc/timer_list"""
def __init__(self):
super(LxTimerList, self).__init__("lx-timerlist", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
hrtimer_bases = gdb.parse_and_eval("&hrtimer_bases")
max_clock_bases = gdb.parse_and_eval("HRTIMER_MAX_CLOCK_BASES")
text = "Timer List Version: gdb scripts\n"
text += "HRTIMER_MAX_CLOCK_BASES: {}\n".format(max_clock_bases)
text += "now at {} nsecs\n".format(ktime_get())
for cpu in cpus.each_online_cpu():
text += print_cpu(hrtimer_bases, cpu, max_clock_bases)
if constants.LX_CONFIG_GENERIC_CLOCKEVENTS:
if constants.LX_CONFIG_GENERIC_CLOCKEVENTS_BROADCAST:
bc_dev = gdb.parse_and_eval("&tick_broadcast_device")
text += print_tickdevice(bc_dev, -1)
text += "\n"
mask = gdb.parse_and_eval("tick_broadcast_mask")
mask = pr_cpumask(mask)
text += "tick_broadcast_mask: {}\n".format(mask)
if constants.LX_CONFIG_TICK_ONESHOT:
mask = gdb.parse_and_eval("tick_broadcast_oneshot_mask")
mask = pr_cpumask(mask)
text += "tick_broadcast_oneshot_mask: {}\n".format(mask)
text += "\n"
tick_cpu_devices = gdb.parse_and_eval("&tick_cpu_device")
for cpu in cpus.each_online_cpu():
tick_dev = cpus.per_cpu(tick_cpu_devices, cpu)
text += print_tickdevice(tick_dev, cpu)
text += "\n"
gdb.write(text)
LxTimerList()
| gpl-2.0 |
sgraham/nope | third_party/webpagereplay/servermanager.py | 5 | 4782 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control "replay.py --server_mode" (e.g. switch from record to replay)."""
import sys
import time
class ServerManager(object):
"""Run servers until is removed or an exception is raised.
Servers start in the order they are appended and stop in the
opposite order. Servers are started by calling the initializer
passed to ServerManager.Append() and by calling __enter__(). Once an
server's initializer is called successfully, the __exit__() function
is guaranteed to be called when ServerManager.Run() completes.
"""
def __init__(self, is_record_mode):
"""Initialize a server manager."""
self.initializers = []
self.record_callbacks = []
self.replay_callbacks = []
self.traffic_shapers = []
self.is_record_mode = is_record_mode
self.should_exit = False
def Append(self, initializer, *init_args, **init_kwargs):
"""Append a server to the end of the list to run.
Servers start in the order they are appended and stop in the
opposite order.
Args:
initializer: a function that returns a server instance.
A server needs to implement the with-statement interface.
init_args: positional arguments for the initializer.
init_args: keyword arguments for the initializer.
"""
self.initializers.append((initializer, init_args, init_kwargs))
def AppendTrafficShaper(self, initializer, *init_args, **init_kwargs):
"""Append a traffic shaper to the end of the list to run.
Args:
initializer: a function that returns a server instance.
A server needs to implement the with-statement interface.
init_args: positional arguments for the initializer.
init_args: keyword arguments for the initializer.
"""
self.traffic_shapers.append((initializer, init_args, init_kwargs))
def AppendRecordCallback(self, func):
"""Append a function to the list to call when switching to record mode.
Args:
func: a function that takes no arguments and returns no value.
"""
self.record_callbacks.append(func)
def AppendReplayCallback(self, func):
"""Append a function to the list to call when switching to replay mode.
Args:
func: a function that takes no arguments and returns no value.
"""
self.replay_callbacks.append(func)
def IsRecordMode(self):
"""Call all the functions that have been registered to enter replay mode."""
return self.is_record_mode
def SetRecordMode(self):
"""Call all the functions that have been registered to enter record mode."""
self.is_record_mode = True
for record_func in self.record_callbacks:
record_func()
def SetReplayMode(self):
"""Call all the functions that have been registered to enter replay mode."""
self.is_record_mode = False
for replay_func in self.replay_callbacks:
replay_func()
def Run(self):
"""Create the servers and loop.
The loop quits if a server raises an exception.
Raises:
any exception raised by the servers
"""
server_exits = []
server_ports = []
exception_info = (None, None, None)
try:
for initializer, init_args, init_kwargs in self.initializers:
server = initializer(*init_args, **init_kwargs)
if server:
server_exits.insert(0, server.__exit__)
server.__enter__()
if hasattr(server, 'server_port'):
server_ports.append(server.server_port)
for initializer, init_args, init_kwargs in self.traffic_shapers:
init_kwargs['ports'] = server_ports
shaper = initializer(*init_args, **init_kwargs)
if server:
server_exits.insert(0, shaper.__exit__)
shaper.__enter__()
while True:
time.sleep(1)
if self.should_exit:
break
except:
exception_info = sys.exc_info()
finally:
for server_exit in server_exits:
try:
if server_exit(*exception_info):
exception_info = (None, None, None)
except:
exception_info = sys.exc_info()
if exception_info != (None, None, None):
raise exception_info[0], exception_info[1], exception_info[2]
| bsd-3-clause |
prampey/servo | tests/wpt/web-platform-tests/tools/wptserve/docs/conf.py | 467 | 7855 | # -*- coding: utf-8 -*-
#
# wptserve documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 14 17:23:24 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath(".."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wptserve'
copyright = u'2013, Mozilla Foundation and other wptserve contributers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wptservedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'wptserve.tex', u'wptserve Documentation',
u'James Graham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wptserve', u'wptserve Documentation',
[u'James Graham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wptserve', u'wptserve Documentation',
u'James Graham', 'wptserve', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mpl-2.0 |
ajylee/gpaw-rtxs | gpaw/coding_style.py | 3 | 3198 | # Copyright (C) 2008 CAMd
# Please see the accompanying LICENSE file for further information.
"""This module is an example of good coding style.
This docstring should begin with a one-line description followed by a
blank line, and then this paragraph describing in more word what kind
of functionality this module implements.
After this docstring we have import statements in this order:
1. From the Python standard library.
2. Other libraries (numpy, ase, ...).
3. GPAW stuff.
"""
from math import pi
import numpy as np
from ase.units import kJ, Hartree
from gpaw import debug
from gpaw.fd_operators import Gradient
import gpaw.mpi as mpi
class SimpleExample:
"""A simple example class.
A headline, a blank line and then this longer description of the
class.
Here one could put an example of how to use the class::
ex = SimpleExample('Test', (2, 3), int, verbose=False)
ex.run(7, verbose=True)
"""
def __init__(self, name, shape, dtype=float, verbose=True):
"""Create an example object.
Again, headline, blank line, ... . If there are many
parameters, there should be a parameter section (see below).
If there only a few possible arguments, then the parameter
section can be left out and the arguments can be described in
the section folowing the headline and blank line (see the
`run` method). If a method is real simple and
self-explanatory, the docstring can be the headline only (see
the `reset` method).
Parameters:
name : string
Name of the example.
shape: tuple
Shape of the ndarray.
dtype: ndarray datatype
The datatype of the ndarray. Here, the description can go
on to a second line if needed. Make sure that the
indentation is like shown here, and remember to end with a
period.
verbose: boolean
Print information about this and that.
Other sections:
There can be other sections - see bolow and here:
http://scipy.org/...
"""
self.name = name
if verbose:
print name
self.a = np.zeros(shape, dtype)
self.verbose = verbose
def method_with_long_name(self, b, out=None):
"""Do something very complicated.
Long story with all details here ...
Parameters:
b : ndarray
Add this array.
out : ndarray
Optional output array.
Returns:
The sum of ...
"""
if out is none:
return self.a + b
else:
return np.add(self.a, b, out)
def run(self, n):
"""Do something.
Do it n times, where n must be a positive integer. The final
result bla-bla is returned.
"""
for i in range(n):
self.a += i
if self.verbose:
print self.a
return pi * self.a / n + 1
def reset(self):
"""Simple method - no explanation needed."""
self.a[:] = 0
def function(a, b):
"""Headline.
Long story ..."""
return a + b
| gpl-3.0 |
SebDieBln/QGIS | python/ext-libs/nose2/tests/unit/test_failfast.py | 16 | 1681 | from nose2.tests._common import TestCase
from nose2.plugins import failfast
from nose2 import result, session
from nose2.compat import unittest
class TestFailFast(TestCase):
tags = ['unit']
def setUp(self):
self.session = session.Session()
self.result = result.PluggableTestResult(self.session)
self.plugin = failfast.FailFast(session=self.session)
self.plugin.register()
class Test(TestCase):
def test(self):
pass
def test_err(self):
raise Exception("oops")
def test_fail(self):
assert False
@unittest.expectedFailure
def test_fail_expected(self):
assert False
@unittest.skipIf(True, "Always skip")
def test_skip(self):
pass
self.case = Test
def test_sets_shouldstop_on_unexpected_error(self):
test = self.case('test_err')
test(self.result)
assert self.result.shouldStop
def test_sets_shouldstop_on_unexpected_fail(self):
test = self.case('test_fail')
test(self.result)
assert self.result.shouldStop
def test_does_not_set_shouldstop_on_expected_fail(self):
test = self.case('test_fail_expected')
test(self.result)
assert not self.result.shouldStop
def test_does_not_set_shouldstop_on_success(self):
test = self.case('test')
test(self.result)
assert not self.result.shouldStop
def test_does_not_set_shouldstop_on_skip(self):
test = self.case('test_skip')
test(self.result)
assert not self.result.shouldStop
| gpl-2.0 |
georgepar/rosdep | test/test_rosdep_arch.py | 5 | 2807 | # Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/kwc@willowgarage.com
import os
import traceback
from mock import Mock, patch
def get_test_dir():
# not used yet
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'arch'))
def test_PacmanInstaller():
from rosdep2.platforms.arch import PacmanInstaller
@patch.object(PacmanInstaller, 'get_packages_to_install')
def test(mock_method):
installer = PacmanInstaller()
mock_method.return_value = []
assert [] == installer.get_install_command(['fake'])
# no interactive option implemented yet
mock_method.return_value = ['a', 'b']
expected = [['sudo', '-H', 'pacman', '-Sy', '--needed', 'a'],
['sudo', '-H', 'pacman', '-Sy', '--needed', 'b']]
val = installer.get_install_command(['whatever'], interactive=False)
assert val == expected, val
expected = [['sudo', '-H', 'pacman', '-Sy', '--needed', 'a'],
['sudo', '-H', 'pacman', '-Sy', '--needed', 'b']]
val = installer.get_install_command(['whatever'], interactive=True)
assert val == expected, val
try:
test()
except AssertionError:
traceback.print_exc()
raise
| bsd-3-clause |
2014c2g9/c2g9 | wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/dom/minicompat.py | 781 | 3228 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-2.0 |
frreiss/tensorflow-fred | tensorflow/python/data/experimental/kernel_tests/optimize_dataset_test.py | 1 | 18438 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_OptimizeDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.ops import threadpool
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
def _captured_refvar_test_combinations():
def make_map_dataset(var):
return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var)
def make_flat_map_dataset(var):
return dataset_ops.Dataset.from_tensors(
0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var))
def make_filter_dataset(var):
return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var)
def make_map_and_batch_dataset(var):
def map_fn(x):
return x + var
return dataset_ops.Dataset.from_tensors(0).apply(
batching.map_and_batch(map_fn, 1))
def make_group_by_reducer_dataset(var):
reducer = grouping.Reducer(
init_func=lambda _: 0,
reduce_func=lambda x, y: x,
finalize_func=lambda _: var)
return dataset_ops.Dataset.range(5).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
def make_group_by_window_dataset(var):
def reduce_fn(key, bucket):
del key, bucket
return dataset_ops.Dataset.from_tensors(var)
return dataset_ops.Dataset.from_tensors(0).repeat(10).apply(
grouping.group_by_window(lambda _: 0, reduce_fn, 10))
def make_scan_dataset(var):
return dataset_ops.Dataset.from_tensors(0).apply(
scan_ops.scan(
0, lambda old_state, elem: (old_state + 1, elem + old_state + var)))
cases = [
# Core datasets
("Map", make_map_dataset),
("FlatMap", make_flat_map_dataset),
("Filter", make_filter_dataset),
# Experimental datasets
("MapAndBatch", make_map_and_batch_dataset),
("GroupByReducer", make_group_by_reducer_dataset),
("GroupByWindow", make_group_by_window_dataset),
("Scan", make_scan_dataset)
]
def reduce_fn(x, y):
name, dataset_fn = y
return x + combinations.combine(
dataset_fn=combinations.NamedObject(name, dataset_fn))
return functools.reduce(reduce_fn, cases, [])
def _disable_intra_op_parallelism_test_combinations():
def make_tensor_dataset():
return dataset_ops.Dataset.from_tensors(42)
def make_map_dataset():
return dataset_ops.Dataset.from_tensors(42).map(lambda x: x + 1)
cases = [
("FromTensors", make_tensor_dataset, [42]),
("Map", make_map_dataset, [43]),
]
def reduce_fn(x, y):
name, dataset_fn, expected_output = y
return x + combinations.combine(
dataset_fn=combinations.NamedObject(name, dataset_fn),
expected_output=[expected_output])
return functools.reduce(reduce_fn, cases, [])
class OptimizeDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda _: random_ops.random_uniform([])).batch(10)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
# TODO(b/123902160)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
dataset = dataset_ops.Dataset.from_tensors(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})
self.evaluate(get_next)
# TODO(b/123902160)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})
self.evaluate(get_next)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDataset(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDatasetWithModifiedRetval(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MapAndBatch"]))
# Should be fused by map and batch fusion
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(1)
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_disable_intra_op_parallelism_test_combinations()))
def testOptimizationDisableIntraOpParallelism(self, dataset_fn,
expected_output):
os.environ["TF_DATA_EXPERIMENT_OPT_IN"] = "disable_intra_op_parallelism"
os.environ["TF_JOB_NAME"] = "test_job"
dataset = dataset_fn()
dataset = dataset.apply(testing.assert_next(["MaxIntraOpParallelism"]))
self.assertDatasetProduces(dataset, expected_output=expected_output)
del os.environ["TF_DATA_EXPERIMENT_OPT_IN"]
del os.environ["TF_JOB_NAME"]
@combinations.generate(test_base.default_test_combinations())
def testOptimizationThreadPoolDataset(self):
dataset = dataset_ops.Dataset.range(10).batch(10)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
2, display_name="private_thread_pool_%d" % 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(10))],
requires_initialization=True)
# Reference variables are not supported in eager mode.
@combinations.generate(
combinations.times(test_base.graph_only_combinations(),
_captured_refvar_test_combinations()))
def testOptimizationWithCapturedRefVar(self, dataset_fn):
"""Tests that default optimizations are disabled with ref variables."""
variable = variable_scope.get_variable(
"v", initializer=0, use_resource=False)
assign_op = variable.assign_add(1)
# Check that warning is logged.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
unoptimized_dataset = dataset_fn(variable)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_and_batch_fusion = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset)
self.assertGreaterEqual(len(w), 1)
graph_rewrites = options._graph_rewrites()
expected = (
"tf.data graph rewrites are not compatible with "
"tf.Variable. The following rewrites will be disabled: %s."
" To enable rewrites, use resource variables instead by "
"calling `tf.enable_resource_variables()` at the start of the "
"program." %
(", ".join(graph_rewrites.enabled + graph_rewrites.default)))
self.assertTrue(any(expected in str(warning) for warning in w))
# Check that outputs are the same in the optimized and unoptimized cases,
# when the variable value is changing.
unoptimized_it = dataset_ops.make_initializable_iterator(
unoptimized_dataset)
with ops.control_dependencies([assign_op]):
unoptimized_output = unoptimized_it.get_next()
optimized_output = optimized_it.get_next()
self.evaluate(variable.initializer)
self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
while True:
try:
unoptimized, optimized = self.evaluate((unoptimized_output,
optimized_output))
self.assertEqual(unoptimized, optimized)
except errors.OutOfRangeError:
break
@combinations.generate(test_base.default_test_combinations())
def testOptimizationDefault(self):
"""Tests the optimization settings by default."""
options = dataset_ops.Options()
expected_optimizations_enabled = []
expected_optimizations_disabled = []
expected_optimizations_default = [
"map_and_batch_fusion",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
options.experimental_optimization.apply_default_optimizations = True
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
options.experimental_optimization.apply_default_optimizations = False
expected_optimizations_default = []
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
@combinations.generate(test_base.default_test_combinations())
def testOptimizationEnabled(self):
"""Tests the optimization settings by enabling all."""
options = dataset_ops.Options()
options.experimental_optimization.filter_fusion = True
options.experimental_optimization.filter_with_random_uniform_fusion = True
options.experimental_optimization.hoist_random_uniform = True
options.experimental_optimization.map_and_batch_fusion = True
options.experimental_optimization.map_and_filter_fusion = True
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.map_fusion = True
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.parallel_batch = True
options.experimental_optimization.shuffle_and_repeat_fusion = True
options.experimental_optimization.map_vectorization.enabled = True
options.experimental_optimization.autotune_buffers = True
options.experimental_deterministic = False
options.experimental_stats.latency_all_edges = True
options.experimental_slack = True
expected_optimizations_enabled = [
"filter_fusion",
"filter_with_random_uniform_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_parallelization",
"map_fusion",
"noop_elimination",
"parallel_batch",
"shuffle_and_repeat_fusion",
"map_vectorization",
"inject_prefetch",
"make_sloppy",
"latency_all_edges",
"slack",
]
expected_optimizations_disabled = []
expected_optimizations_default = []
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
@combinations.generate(test_base.default_test_combinations())
def testOptimizationDisabled(self):
"""Tests the optimization settings by disabling all."""
options = dataset_ops.Options()
options.experimental_optimization.filter_fusion = False
options.experimental_optimization.filter_with_random_uniform_fusion = False
options.experimental_optimization.hoist_random_uniform = False
options.experimental_optimization.map_and_batch_fusion = False
options.experimental_optimization.map_and_filter_fusion = False
options.experimental_optimization.map_parallelization = False
options.experimental_optimization.map_fusion = False
options.experimental_optimization.noop_elimination = False
options.experimental_optimization.parallel_batch = False
options.experimental_optimization.shuffle_and_repeat_fusion = False
options.experimental_optimization.map_vectorization.enabled = False
options.experimental_optimization.autotune = False
options.experimental_deterministic = True
options.experimental_stats.latency_all_edges = False
options.experimental_slack = False
expected_optimizations_enabled = []
expected_optimizations_disabled = [
"filter_fusion",
"filter_with_random_uniform_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_parallelization",
"map_fusion",
"noop_elimination",
"parallel_batch",
"shuffle_and_repeat_fusion",
"map_vectorization",
"inject_prefetch",
"make_sloppy",
"latency_all_edges",
"slack",
]
expected_optimizations_default = []
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
@combinations.generate(test_base.default_test_combinations())
def testAutotuningDefaults(self):
options = dataset_ops.Options()
# Check defaults
autotune, algorithm, cpu_budget, ram_budget = options._autotune_settings()
self.assertTrue(autotune)
self.assertEqual(algorithm,
optimization_options._AutotuneAlgorithm.HILL_CLIMB)
self.assertEqual(cpu_budget, 0)
self.assertEqual(ram_budget, 0)
@combinations.generate(test_base.default_test_combinations())
def testAutotuningSettings(self):
options = dataset_ops.Options()
options.experimental_optimization.autotune_cpu_budget = 1000
options.experimental_optimization.autotune_ram_budget = 999999999
options.experimental_optimization.autotune_buffers = True
self.assertIn("inject_prefetch", options._graph_rewrites().enabled)
autotune, algorithm, cpu_budget, ram_budget = options._autotune_settings()
self.assertTrue(autotune)
self.assertEqual(algorithm,
optimization_options._AutotuneAlgorithm.GRADIENT_DESCENT)
self.assertEqual(cpu_budget, 1000)
self.assertEqual(ram_budget, 999999999)
if __name__ == "__main__":
test.main()
| apache-2.0 |
trishnaguha/ansible | lib/ansible/modules/storage/netapp/netapp_e_snapshot_images.py | 31 | 8776 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_snapshot_images
short_description: NetApp E-Series create and delete snapshot images
description:
- Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
- Only the oldest snapshot image can be deleted so consistency is preserved.
- "Related: Snapshot volumes are created from snapshot images."
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
snapshot_group:
description:
- The name of the snapshot group in which you want to create a snapshot image.
required: True
state:
description:
- Whether a new snapshot image should be created or oldest be deleted.
required: True
choices: ['create', 'remove']
"""
EXAMPLES = """
- name: Create Snapshot
netapp_e_snapshot_images:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ validate_certs }}"
snapshot_group: "3300000060080E5000299C24000005B656D9F394"
state: 'create'
"""
RETURN = """
---
msg:
description: State of operation
type: str
returned: always
sample: "Created snapshot image"
image_id:
description: ID of snaphot image
type: str
returned: state == created
sample: "3400000060080E5000299B640063074057BC5C5E "
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
snap_groups_url = api_url + snap_groups
(ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
snapshot_group_id = None
for snapshot_group in snapshot_groups:
if name == snapshot_group['label']:
snapshot_group_id = snapshot_group['pitGroupRef']
break
if snapshot_group_id is None:
module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return snapshot_group
def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
get_status = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + get_status
try:
(ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as err:
module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
(name, ssid, to_native(err)))
if not images:
module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
oldest = min(images, key=lambda x: x['pitSequenceNumber'])
if oldest is None or "pitRef" not in oldest:
module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return oldest
def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
snapshot_group_id = snapshot_group_obj['pitGroupRef']
endpoint = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + endpoint
post_data = json.dumps({'groupId': snapshot_group_id})
image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
if image_data[1]['status'] == 'optimal':
status = True
id = image_data[1]['id']
else:
status = False
id = ''
return status, id
def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
image_id = image['pitRef']
endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
url = api_url + endpoint
try:
(ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as e:
image_data = (e[0], e[1])
if ret == 204:
deleted_status = True
error_message = ''
else:
deleted_status = False
error_message = image_data[1]['errorMessage']
return deleted_status, error_message
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
snapshot_group=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
state=dict(required=True, choices=['create', 'remove'], type='str'),
))
module = AnsibleModule(argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
snapshot_group = p.pop('snapshot_group')
desired_state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
if desired_state == 'create':
created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
if created_status:
module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
else:
deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
if deleted:
module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
ssid, snapshot_group, error_msg))
if __name__ == '__main__':
main()
| gpl-3.0 |
BobBuildTool/bob | pym/bob/scm/scm.py | 2 | 13149 | # Bob build tool
# Copyright (C) 2017 Jan Klötzke
#
# SPDX-License-Identifier: GPL-3.0-or-later
from ..errors import ParseError
from ..utils import joinLines
from abc import ABCMeta, abstractmethod
from enum import Enum
from shlex import quote
import fnmatch
import re
SYNTHETIC_SCM_PROPS = frozenset(('__source', 'recipe', 'overridden'))
class ScmOverride:
def __init__(self, override):
self.__match = override.get("match", {})
self.__del = override.get("del", [])
self.__set = override.get("set", {})
self.__if = override.get("if", None)
self.__replaceRaw = override.get("replace", {})
self.__init()
def __init(self):
try:
self.__replace = { key : (re.compile(subst["pattern"]), subst["replacement"])
for (key, subst) in self.__replaceRaw.items() }
except re.error as e:
raise ParseError("Invalid scmOverrides replace pattern: '{}': {}"
.format(e.pattern, str(e)))
def __getstate__(self):
return (self.__match, self.__del, self.__set, self.__replaceRaw, self.__if)
def __setstate__(self, s):
(self.__match, self.__del, self.__set, self.__replaceRaw, self.__if) = s
self.__init()
def __doesMatch(self, scm, env):
if self.__if is not None and not env.evaluate(self.__if, "scmOverride::if"): return False
for (key, value) in self.__match.items():
if key not in scm: return False
if type(scm[key]) != type(value): return False
if isinstance(value, str):
value = env.substitute(value, "svmOverride::match")
if not fnmatch.fnmatchcase(scm[key], value): return False
else:
if scm[key] != value: return False
return True
def __hash__(self):
return hash((frozenset(self.__match.items()), frozenset(self.__del),
frozenset(self.__set.items()), frozenset(self.__replace.items())))
def __eq__(self, other):
return ((self.__match, self.__del, self.__set, self.__replace) ==
(other.__match, other.__del, other.__set, other.__replace))
def __applyEnv(self, env):
rm = [ env.substitute(d, "svmOverrides::del") for d in self.__del ]
set = {
k : env.substitute(v, "svmOverrides::set: "+k) if isinstance(v, str) else v
for (k,v) in self.__set.items()
}
return rm, set
def mangle(self, scm, env):
ret = False
if self.__doesMatch(scm, env):
rm, set = self.__applyEnv(env)
ret = True
scm = scm.copy()
for d in rm:
if d in scm: del scm[d]
scm.update(set)
for (key, (pat, repl)) in self.__replace.items():
if key in scm:
scm[key] = re.sub(pat, repl, scm[key])
return ret, scm
def __str__(self):
import yaml
spec = {}
if self.__match: spec['match'] = self.__match
if self.__del: spec['del'] = self.__del
if self.__set: spec['set'] = self.__set
if self.__replaceRaw: spec['replace'] = self.__replaceRaw
return yaml.dump(spec, default_flow_style=False).rstrip()
class ScmTaint(Enum):
"""
The taint flags are single letter flags that indicate certain states of the
SCM.
Their meaning is as follows:
- attic - Recipe changed. Will be moved to attic.
- collides - New checkout but obstructed by existing file.
- error - Something went really wrong when getting status.
- modified - The SCM has been modified wrt. checked out commit.
- new - New checkout.
- overridden - A scmOverrides entry applies.
- switched - The SCM branch/tag/commit was changed by the user.
- unknown - Not enough information to get further status.
- unpushed_main - Configured branch with commits not in remote.
- unpushed_local - Some local branch with unpushed commits exists.
"""
attic = 'A'
collides = 'C'
error = 'E'
modified = 'M'
new = 'N'
overridden = 'O'
switched = 'S'
unknown = '?'
unpushed_main = 'U'
unpushed_local = 'u'
class ScmStatus:
""""
Describes an SCM status wrt. recipe.
The important status is stored as a set of ScmTaint flags. Additionally the
'description' field holds any output from the SCM tool that is interesting
to the user to judge the SCM status. This is only shown in verbose output
mode.
"""
def __init__(self, flag=None, description=""):
self.__flags = {}
if flag is not None:
self.__flags[flag] = description
def __str__(self):
return "".join(sorted(f.value for f in self.flags))
@property
def clean(self):
"""
Is SCM branch/tag/commit the same as specified in the recipe and no
local changes?
"""
return not self.dirty
@property
def dirty(self):
"""
Is SCM is dirty?
Could be: errors, modified files or switched to another
branch/tag/commit/repo. Unpushed commits on the configured branch also
count as dirty because they are locally commited changes that are not
visible upstream. On the other hand unpushed changes on unrelated
branches (unpushed_local) do not count.
"""
return bool(self.flags & {ScmTaint.modified, ScmTaint.error,
ScmTaint.switched, ScmTaint.unpushed_main})
@property
def error(self):
"""
Check if SCM is in an error state.
Set if the SCM command returned a error code or something unexpected
happened while gathering the status.
"""
return ScmTaint.error in self.flags
@property
def expendable(self):
"""
Could the SCM be deleted without loosing user data?
This is more strict than 'dirty' because it includes unrelated local
branches that the user might have created.
"""
return not self.dirty and self.flags.isdisjoint(
{ScmTaint.unpushed_local, ScmTaint.unknown})
@property
def flags(self):
return frozenset(self.__flags.keys())
def description(self, subset=None):
if subset:
flags = {
flag : description for flag,description in self.__flags.items()
if flag in subset
}
else:
flags = self.__flags
# join active descriptions sorted by flag value
return joinLines(*(d for f,d in
sorted(flags.items(), key=lambda x: x[0].value)))
def add(self, flag, description=""):
if flag in self.__flags:
self.__flags[flag] = joinLines(self.__flags[flag], description)
else:
self.__flags[flag] = description
def merge(self, other):
for flag,description in other.__flags.items():
self.add(flag, description)
class Scm(metaclass=ABCMeta):
def __init__(self, spec, overrides):
# Recipe foobar, checkoutSCM dir:., url:asdf
self.__source = spec.get("__source", "<unknown>") + " in checkoutSCM: dir:" + \
spec.get("dir", ".") + ", url:" + spec.get("url", "?")
self.__recipe = spec["recipe"]
self.__overrides = overrides
def _diffSpec(self, oldSpec):
newSpec = self.getProperties(False)
ret = set()
for k in sorted(set(oldSpec.keys()) | set(newSpec.keys())):
if oldSpec.get(k) != newSpec.get(k):
ret.add(k)
ret -= SYNTHETIC_SCM_PROPS
ret -= {"if"}
return ret
def getSource(self):
return self.__source
def getProperties(self, isJenkins):
# XXX: keep in sync with SYNTHETIC_SCM_PROPS
return {
"__source" : self.__source,
"recipe" : self.__recipe,
"overridden" : bool(self.__overrides),
}
@abstractmethod
async def invoke(self, invoker):
"""Execute the SCM checkout with the passed invoker instance.
Everything must be done with the passed invoker instance. It will be
configured for the right workspace and will do the logging, error
handling and so on...
"""
def canSwitch(self, oldSpec):
"""Determine if an inline switch of a checkout from oldSpec is
possible.
The judgement is purely done on the specification of this SCM and
oldSpec. If the SCM supports a switch from oldSpec then this method
may return True. It must return False in any other case. In case it
returns True the Scm.switch() method will be invoked to do the acutal
switch in the workspace. This might still fail if the workspace is in
an unexpected state.
"""
return False
async def switch(self, workspacePath, oldSpec):
"""Try to switch the checkout in the workspace from oldSpec.
If the switch succeeds then the checkout won't be moved to the attic.
The SCM has to make sure that the result is the same as if the SCM was
moved to the attic and a fresh checkout would have been done.
"""
return False
@abstractmethod
def asDigestScript(self):
"""Return forward compatible stable string describing this SCM.
The string should represent what the SCM checks out. This is different
from the actual actions that are returned by asScript() or asJenkins()
which might evolve in future versions. The returned string is used to
compute the various IDs and to detect changes to the SDM.
"""
return ""
def asJenkins(self, workPath, credentials, options):
"""Return Jenkins xml.etree.ElementTree fragment that does the checkout.
This is only called if hasJenkinsPlugin() returns True. In this case
asScript() is not used on Jenkins.
"""
return None
def hasJenkinsPlugin(self):
"""Does this SCM use a Jenins plugin?"""
return False
@abstractmethod
def getDirectory(self):
"""Return relative directory that this SCM owns in the workspace."""
return ""
@abstractmethod
def isDeterministic(self):
"""Return whether the SCM is deterministic."""
return False
def isLocal(self):
"""Return true if the SCM does not use any remote repository.
Such SCMs are treated special because there is no notion of
checkout/checkin."""
return False
def status(self, workspacePath):
"""Get SCM work-space status.
The purpose of this method is to return the status of the given
directory in the work-space. The returned value is used for 'bob
status' and to implement --clean-checkout. Shall return a ScmStatus()
object.
This method is called when building with --clean-checkout. If the
returned ScmStatus objects 'error' or 'dirty' properties are True then
the SCM is moved to the attic, while clean directories are not.
"""
return ScmStatus()
def getActiveOverrides(self):
"""Return list of ScmOverride objects that matched this SCM."""
return self.__overrides
def getAuditSpec(self):
"""Return spec for audit trail generation.
Must return a tuple of three elements. The first element is a string that
is used to find the right Audit class (see bob.audit.Artifact.SCMS).
The second element is a relative directory in the workspace that must
be audited. The third element is a dict with additional meta information
that is passed to the audit scanner.
If the SCM does not support audit trail generation then None shall be
returned.
"""
return None
def hasLiveBuildId(self):
"""Check if live build-ids are supported."""
return False
async def predictLiveBuildId(self, step):
"""Query server to predict live build-id."""
return None
def calcLiveBuildId(self, workspacePath):
"""Calculate live build-id from workspace."""
return None
def getLiveBuildIdSpec(self, workspacePath):
"""Generate spec lines for bob-hash-engine."""
return None
class ScmAudit(metaclass=ABCMeta):
@classmethod
async def fromDir(cls, workspace, dir, extra):
"""Create SCM audit record by scanning a directory"""
scm = cls()
await scm._scanDir(workspace, dir, extra)
return scm
@classmethod
def fromData(cls, data):
"""Restore SCM audit from serialized record"""
scm = cls()
scm._load(data)
return scm
@abstractmethod
async def _scanDir(self, workspace, dir):
"""Scan directory for SCM"""
pass
@abstractmethod
def _load(self, data):
"""Load from persisted record"""
pass
@abstractmethod
def dump(self):
"""Serialize state into an ElementTree.Element"""
pass
def getStatusLine(self):
return "unknown"
| gpl-3.0 |
osvalr/odoo | addons/account_bank_statement_extensions/__openerp__.py | 378 | 2357 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bank Statement Extensions to Support e-banking',
'version': '0.3',
'license': 'AGPL-3',
'author': 'Noviat',
'category': 'Generic Modules/Accounting',
'description': '''
Module that extends the standard account_bank_statement_line object for improved e-banking support.
===================================================================================================
This module adds:
-----------------
- valuta date
- batch payments
- traceability of changes to bank statement lines
- bank statement line views
- bank statements balances report
- performance improvements for digital import of bank statement (via
'ebanking_import' context flag)
- name_search on res.partner.bank enhanced to allow search on bank
and iban account numbers
''',
'depends': ['account'],
'demo': [],
'data' : [
'security/ir.model.access.csv',
'account_bank_statement_view.xml',
'account_bank_statement_report.xml',
'wizard/confirm_statement_line_wizard.xml',
'wizard/cancel_statement_line_wizard.xml',
'data/account_bank_statement_extensions_data.xml',
'views/report_bankstatementbalance.xml',
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ulodciv/cluster_deployer | src/pgha_deployer.py | 2 | 6541 | import logging
import json
from argparse import ArgumentParser
from datetime import timedelta
from functools import partial
from ipaddress import IPv4Interface, ip_interface, IPv4Address
from pathlib import PurePosixPath, Path
from time import time
from deployerlib.cluster import Cluster
from deployerlib.ha import HA
from deployerlib.pg import PG
from deployerlib.vm import VBox
class PgHaVm(HA, PG, VBox):
def __init__(self, **kwargs):
super(PgHaVm, self).__init__(**kwargs)
def pgha_deploy_db(self, demo_db_file):
local_db_file = Path(demo_db_file)
db_file_name = local_db_file.name
remote_db_file = PurePosixPath("/tmp") / db_file_name
self.sftp_put(local_db_file, remote_db_file, self.pg_user)
db = local_db_file.stem
self.ssh_run_check(
f"cd /tmp && tar -xf {db_file_name} && rm -f {db_file_name}",
user=self.pg_user)
self.pg_drop_db(db)
self.pg_create_db(db)
self.ssh_run_check(
f"cd /tmp/{db} && {self.psql} -p {self.pg_port} -v ON_ERROR_STOP=1 "
f"-t -q -f install.sql {db}",
user=self.pg_user)
self.ssh_run_check(f'rm -rf /tmp/{db}')
class PgHaCluster(Cluster):
def __init__(self, *, cluster_def, **kwargs):
super(PgHaCluster, self).__init__(
cluster_def=cluster_def, vm_class=PgHaVm, **kwargs)
self.master = None
self.demo_db = cluster_def["demo_db"]
self.pgha_file = cluster_def["pgha_file"]
self.pgha_resource = cluster_def["pgha_resource"]
self.pgha_resource_master = f"{self.pgha_resource}-master"
self.pgha_resource_master_ip = f"{self.pgha_resource_master}-ip"
self.virtual_ip = cluster_def["virtual_ip"]
def deploy(self):
self.deploy_base()
self.pgha_put_pgha_on_nodes()
self.pgha_setup_master()
self.pgha_setup_slaves()
self.pgha_setup_ra()
@property
def standbies(self):
return [vm for vm in self.vms if vm != self.master]
def pgha_put_pgha_on_nodes(self):
remote_ra = "/usr/lib/ocf/resource.d/heartbeat/pgha"
for vm in self.vms:
vm.sftp_put(self.pgha_file, remote_ra)
vm.ssh_run_check(f"chmod +x {remote_ra}")
def pgha_setup_master(self):
self.master = self.vms[0]
master = self.master
master.pg_start()
master.pgha_deploy_db(self.demo_db)
master.pg_create_replication_user()
hba_file = master.pg_hba_file
for vm in self.vms:
cmds = [
f'echo "host replication {h.pg_repl_user} {h.ip}/32 trust" '
f'>> {hba_file}'
for h in self.vms]
vm.ssh_run_check(cmds, user=vm.pg_user)
master.pg_make_master(self.vms)
master.pg_restart()
master.pg_add_replication_slots(self.standbies)
def pgha_setup_slaves(self):
master = self.master
self.call([partial(m.pg_backup, master) for m in self.standbies])
for vm in self.vms:
if vm == master:
vm.pg_write_recovery_conf()
else:
vm.pg_write_recovery_conf(master.name)
self.call([partial(m.pg_start_stop) for m in self.standbies])
master.pg_stop()
def pgha_setup_ra(self):
master = self.master
master.ha_base_setup(self.vms)
master.ha_set_migration_threshold(5)
master.ha_set_resource_stickiness(10)
master.ha_disable_stonith()
self.pgha_configure_cib()
def ha_get_vip_ipv4(self):
if type(self.virtual_ip) is IPv4Interface:
return self.virtual_ip
if type(self.virtual_ip) is IPv4Address:
return IPv4Interface(str(self.virtual_ip) + "/24")
if "/" in self.virtual_ip:
return ip_interface(self.virtual_ip)
return IPv4Interface(self.virtual_ip + "/24")
def pgha_configure_cib(self):
master = self.master
master.ha_get_cib()
# pg_host: tcp or unix_socket_directories?
if "unix_socket_directories" in master.pg_conf_dict:
l = master.pg_conf_dict["unix_socket_directories"].split(",")
if len(l) > 0 and l[0]:
pg_host = l[0]
else:
pg_host = "localhost"
else:
pg_host = "/tmp"
# pgha
master.ha_pcs_xml(
f'resource create {self.pgha_resource} ocf:heartbeat:pgha '
f'pgbindir={master.pg_bindir} '
f'pgdata={master.pg_data_directory} '
f'pgconf={master.pg_config_file} '
f'pgport={master.pg_port} '
f'pghost={pg_host} '
f'op start timeout=60s '
f'op stop timeout=60s '
f'op promote timeout=120s '
f'op demote timeout=120s '
f'op monitor interval=5s timeout=10s role="Master" '
f'op monitor interval=6s timeout=10s role="Slave" '
f'op notify timeout=60s')
master.ha_pcs_xml(
f"resource master {self.pgha_resource_master} {self.pgha_resource} "
f"clone-max=10 notify=true")
# VIP
ipv4 = self.ha_get_vip_ipv4()
pgha_resource_master_ip = self.pgha_resource_master_ip
master.ha_pcs_xml(
f"resource create {pgha_resource_master_ip} ocf:heartbeat:IPaddr2 "
f"ip={ipv4.ip} cidr_netmask={ipv4.network.prefixlen}")
master.ha_pcs_xml(
f"constraint colocation add {pgha_resource_master_ip} "
f"with master {self.pgha_resource_master} INFINITY")
master.ha_pcs_xml(
f"constraint order promote {self.pgha_resource_master} "
f"then start {pgha_resource_master_ip}")
master.ha_cib_push()
def parse_args():
parser = ArgumentParser(description='Deploy a cluster')
parser.add_argument("json_file", help="Cluster definition (JSON)")
parser.add_argument('--use-threads', action='store_true', default=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
start = time()
with open(args.json_file) as f:
cluster = PgHaCluster(
cluster_def=json.load(f), use_threads=args.use_threads)
cluster.deploy()
logging.getLogger("main").debug(f"took {timedelta(seconds=time() - start)}")
| mit |
FreekingDean/home-assistant | homeassistant/components/binary_sensor/trend.py | 9 | 4571 | """
A sensor that monitors trands in other components.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.trend/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.components.binary_sensor import (
BinarySensorDevice,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SENSOR_CLASSES_SCHEMA)
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_ENTITY_ID,
CONF_SENSOR_CLASS,
STATE_UNKNOWN,)
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import track_state_change
_LOGGER = logging.getLogger(__name__)
CONF_SENSORS = 'sensors'
CONF_ATTRIBUTE = 'attribute'
CONF_INVERT = 'invert'
SENSOR_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_ATTRIBUTE): cv.string,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Optional(CONF_SENSOR_CLASS, default=None): SENSOR_CLASSES_SCHEMA
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): vol.Schema({cv.slug: SENSOR_SCHEMA}),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the trend sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
entity_id = device_config[ATTR_ENTITY_ID]
attribute = device_config.get(CONF_ATTRIBUTE)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
sensor_class = device_config[CONF_SENSOR_CLASS]
invert = device_config[CONF_INVERT]
sensors.append(
SensorTrend(
hass,
device,
friendly_name,
entity_id,
attribute,
sensor_class,
invert)
)
if not sensors:
_LOGGER.error("No sensors added")
return False
add_devices(sensors)
return True
class SensorTrend(BinarySensorDevice):
"""Representation of a trend Sensor."""
def __init__(self, hass, device_id, friendly_name,
target_entity, attribute, sensor_class, invert):
"""Initialize the sensor."""
self._hass = hass
self.entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id,
hass=hass)
self._name = friendly_name
self._target_entity = target_entity
self._attribute = attribute
self._sensor_class = sensor_class
self._invert = invert
self._state = None
self.from_state = None
self.to_state = None
@callback
def trend_sensor_state_listener(entity, old_state, new_state):
"""Called when the target device changes state."""
self.from_state = old_state
self.to_state = new_state
hass.async_add_job(self.async_update_ha_state(True))
track_state_change(hass, target_entity,
trend_sensor_state_listener)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def sensor_class(self):
"""Return the sensor class of the sensor."""
return self._sensor_class
@property
def should_poll(self):
"""No polling needed."""
return False
@asyncio.coroutine
def async_update(self):
"""Get the latest data and update the states."""
if self.from_state is None or self.to_state is None:
return
if (self.from_state.state == STATE_UNKNOWN or
self.to_state.state == STATE_UNKNOWN):
return
try:
if self._attribute:
from_value = float(
self.from_state.attributes.get(self._attribute))
to_value = float(
self.to_state.attributes.get(self._attribute))
else:
from_value = float(self.from_state.state)
to_value = float(self.to_state.state)
self._state = to_value > from_value
if self._invert:
self._state = not self._state
except (ValueError, TypeError) as ex:
self._state = None
_LOGGER.error(ex)
| mit |
pyfa-org/eos | tests/integration/stats/slot/test_launcher.py | 1 | 5726 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import EffectMode
from eos import ModuleHigh
from eos import Ship
from eos.const.eos import ModAffecteeFilter
from eos.const.eos import ModDomain
from eos.const.eos import ModOperator
from eos.const.eve import AttrId
from eos.const.eve import EffectCategoryId
from eos.const.eve import EffectId
from tests.integration.stats.testcase import StatsTestCase
class TestLauncherSlot(StatsTestCase):
def setUp(self):
StatsTestCase.setUp(self)
self.mkattr(attr_id=AttrId.launcher_slots_left)
self.effect = self.mkeffect(
effect_id=EffectId.launcher_fitted,
category_id=EffectCategoryId.passive)
def test_output(self):
# Check that modified attribute of ship is used
src_attr = self.mkattr()
modifier = self.mkmod(
affectee_filter=ModAffecteeFilter.item,
affectee_domain=ModDomain.self,
affectee_attr_id=AttrId.launcher_slots_left,
operator=ModOperator.post_mul,
affector_attr_id=src_attr.id)
mod_effect = self.mkeffect(
category_id=EffectCategoryId.passive,
modifiers=[modifier])
self.fit.ship = Ship(self.mktype(
attrs={AttrId.launcher_slots_left: 3, src_attr.id: 2},
effects=[mod_effect]).id)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 6)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_output_ship_absent(self):
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_output_ship_attr_absent(self):
self.fit.ship = Ship(self.mktype().id)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_output_ship_not_loaded(self):
self.fit.ship = Ship(self.allocate_type_id())
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_multiple(self):
self.fit.modules.high.append(
ModuleHigh(self.mktype(effects=[self.effect]).id))
self.fit.modules.high.append(
ModuleHigh(self.mktype(effects=[self.effect]).id))
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_multiple_with_none(self):
self.fit.modules.high.place(
1, ModuleHigh(self.mktype(effects=[self.effect]).id))
self.fit.modules.high.place(
3, ModuleHigh(self.mktype(effects=[self.effect]).id))
# Verification
# Positions do not matter
self.assertEqual(self.fit.stats.launcher_slots.used, 2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_effect_absent(self):
item1 = ModuleHigh(self.mktype(effects=[self.effect]).id)
item2 = ModuleHigh(self.mktype().id)
self.fit.modules.high.append(item1)
self.fit.modules.high.append(item2)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 1)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_effect_disabled(self):
item1 = ModuleHigh(self.mktype(effects=[self.effect]).id)
item2 = ModuleHigh(self.mktype(effects=[self.effect]).id)
item2.set_effect_mode(self.effect.id, EffectMode.force_stop)
self.fit.modules.high.append(item1)
self.fit.modules.high.append(item2)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 1)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_absent(self):
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_not_loaded(self):
self.fit.modules.high.append(ModuleHigh(self.allocate_type_id()))
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
| lgpl-3.0 |
xasos/crowdsource-platform | crowdsourcing/serializers/message.py | 10 | 2784 | from crowdsourcing import models
from datetime import datetime
from rest_framework import serializers
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from crowdsourcing.serializers.dynamic import DynamicFieldsModelSerializer
from rest_framework.exceptions import ValidationError
from crowdsourcing.models import Conversation, Message, ConversationRecipient, UserMessage
class MessageSerializer(DynamicFieldsModelSerializer):
class Meta:
model = models.Message
fields = ('id', 'conversation', 'sender', 'created_timestamp', 'last_updated', 'body', 'status')
read_only_fields = ('created_timestamp', 'last_updated', 'sender')
def create(self, **kwargs):
message = Message.objects.get_or_create(sender=kwargs['sender'], **self.validated_data)
for recipient in message[0].conversation.recipients.all():
UserMessage.objects.get_or_create(user=recipient, message=message[0])
class ConversationSerializer(DynamicFieldsModelSerializer):
recipients = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=True)
messages = MessageSerializer(many=True, read_only=True)
class Meta:
model = models.Conversation
fields = ('id', 'subject', 'sender', 'created_timestamp', 'last_updated', 'recipients', 'messages')
read_only_fields = ('created_timestamp', 'last_updated', 'sender')
def create(self, **kwargs):
recipients = self.validated_data.pop('recipients')
conversation = Conversation.objects.get_or_create(sender=kwargs['sender'], **self.validated_data)
for recipient in recipients:
ConversationRecipient.objects.get_or_create(conversation=conversation[0], recipient=recipient)
class CommentSerializer(DynamicFieldsModelSerializer):
sender_alias = serializers.SerializerMethodField()
posted_time = serializers.SerializerMethodField()
class Meta:
model = models.Comment
fields = ('id', 'sender', 'body', 'parent', 'deleted', 'created_timestamp', 'last_updated', 'sender_alias', 'posted_time')
read_only_fields = ('sender', 'sender_alias', 'posted_time')
def get_sender_alias(self, obj):
if hasattr(obj.sender, 'requester'):
return obj.sender.requester.alias
elif hasattr(obj.sender, 'worker'):
return obj.sender.worker.alias
else:
return 'unknown'
def get_posted_time(self, obj):
from crowdsourcing.utils import get_time_delta
delta = get_time_delta(obj.created_timestamp)
return delta
def create(self, **kwargs):
comment = models.Comment.objects.create(sender=kwargs['sender'], deleted=False, **self.validated_data)
return comment
| mit |
Theer108/invenio | invenio/modules/deposit/fields/notes.py | 15 | 1283 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from wtforms import TextAreaField
from invenio.modules.deposit.field_base import WebDepositField
__all__ = ['NotesField']
class NotesField(WebDepositField, TextAreaField):
def __init__(self, **kwargs):
import warnings
warnings.warn("Field has been deprecated", PendingDeprecationWarning)
defaults = dict(
icon='list',
widget_classes="form-control"
)
defaults.update(kwargs)
super(NotesField, self).__init__(**defaults)
| gpl-2.0 |
lillian-lemmer/hypatia | hypatia/resources.py | 3 | 8680 | # This module is part of Hypatia and is released under the
# MIT license: http://opensource.org/licenses/MIT
"""These are utilities which are commonly utilized
by all modules in Hypatia. It serves for the ugly,
underlying components of miscellaneous actions which
assist other modules, and does not do much on its own.
"""
import os
import zipfile
from io import BytesIO
try:
import ConfigParser as configparser
from cStringIO import StringIO
except ImportError:
import configparser
from io import StringIO
import pygame
from hypatia.animatedsprite import AnimatedSprite
class Resource(object):
"""A zip archive in the resources directory, located by
supplying a resource category and name. Files are stored
as a str, BytesIO, PygAnimation, or ConfigParser, in a
dictionary. Files are referenced by filepath/filename.
Attributes:
files (dict): Key is file name, value can be one of str,
BytesIO, PygAnim, or ConfigParser objects.
Example:
>>> from hypatia import animatedsprite as anim
>>> resource = Resource('walkabouts', 'debug')
>>> 'only.gif' in resource
True
>>> isinstance(resource['only.gif'], anim.AnimatedSprite)
True
>>> resource = Resource('scenes', 'debug')
>>> resource['tilemap.txt'].startswith('debug')
True
"""
def __init__(self, resource_category, resource_name):
"""Load a resource ZIP using a category and zip name.
Args:
resource_category (str): E.g., tilesheets, walkabouts.
resource_name (str): E.g., debug.
"""
# The default path for a resource is:
# ./resource_category/resource_name
# We'll be looking for an archive or directory that
# looks something like these examples:
# * ./resources/walkabouts/hat
# * ./resources/scenes/debug.zip
# Keep in mind that directories are chosen over
# zip archives (if the names are the same).
path = os.path.join('resources',
resource_category,
resource_name)
# Once files have been collected from the aforementioned
# path, the files will be passed through their respective
# file_handler, if available for the given file extension.
file_handlers = {
'.ini': load_ini,
'.gif': load_gif,
'.png': load_png,
'.txt': load_txt,
}
# 1. Create a dictionary, where the key is the file name
# (including extension) and the value is the result
# of using x.open(path).read().
files = {}
# choose between loading as an unpacked directory, or a zip file.
# unpacked takes priority.
if os.path.isdir(path):
# go through each file in the supplied path, making an
# entry in the files dictionary, whose value is the
# file data (bytesio) and key is file name.
for file_name in os.listdir(path):
file_path = os.path.join(path, file_name)
file_data = open(file_path, "rb").read()
files[file_name] = file_data
# we're dealing with a zip file for our resources
else:
with zipfile.ZipFile(path + ".zip") as zip_file:
for file_name in zip_file.namelist():
# because namelist will also generate
# the directories
if not file_name:
continue
file_data = zip_file.open(file_name).read()
files[file_name] = file_data
# 2. "Prepare" the "raw file data" from the files
# dictionary we just created. If a given file's
# file extension is in file_handlers, the data
# will be updated by an associated function.
for file_name in files.keys():
file_data = files[file_name]
file_extension = os.path.splitext(file_name)[1]
# if there is a known "handler" for this extension,
# we want the file data for this file to be the output
# of said handler
if file_extension in file_handlers:
file_data = file_handlers[file_extension](files, file_name)
files[file_name] = file_data
self.files = files
def __getitem__(self, file_name):
return self.files[file_name]
def __contains__(self, item):
return item in self.files
def get_type(self, file_extension):
"""Return a dictionary of files which have the file extension
specified. Remember to include the dot, e.g., ".gif"!
Arg:
file_extension (str): the file extension (including dot) of
the files to return.
Warning:
Remember to include the dot in the file extension, e.g., ".gif".
Returns:
dict|None: {file name: file content} of files which have the
file extension specified. If no files match,
None is returned.
"""
matching_files = {}
for file_name, file_content in self.files.items():
if os.path.splitext(file_name)[1] == file_extension:
matching_files[file_name] = file_content
return matching_files or None
def load_png(files, file_name):
"""Return an BytesIO object based on supplied file. This is
a file handler for Resource.
Args:
files (dict): Resources files, whereas key is the file name,
and the value is the untouched file contents itself.
file_name (str): File from "files" to use for making an
AnimatedSprite object.
Returns:
AnimatedSprite: --
See Also:
* Resources.__init__()
* animations.AnimatedSprite
"""
return BytesIO(files[file_name])
def load_txt(files, file_name):
"""Return a decoded string based on supplied file. This is
a file handler for Resource.
Args:
files (dict): Resource files, whereas key is the file
name and the value is the untouched file contents
itself.
file_name (StR): File from "files" to use for making
an animatedSprite object.
Returns:
AnimatedSprite: --
See Also:
* Resources.__init__()
* animations.AnimatedSprite
"""
return files[file_name].decode('utf-8')
def load_gif(files, file_name):
"""Return an AnimatedSprite object based on a bytesio
object. This is a file handler.
Args:
files (dict): Resources files, whereas key is the file name,
and the value is the untouched file contents itself.
file_name (str): File from "files" to use for making an
AnimatedSprite object.
Returns:
AnimatedSprite: --
See Also:
* Resources.__init__()
* animations.AnimatedSprite
"""
file_data = files[file_name]
# NOTE: i used to handle this just in
# Resources.__init__()
gif_bytesio = BytesIO(file_data)
# get the corersponding INI which configures our anchor points
# for this gif, from the files
gif_name_no_ext = os.path.splitext(file_name)[0]
try:
anchor_ini_name = gif_name_no_ext + '.ini'
anchor_config_ini = files[anchor_ini_name]
# if the INI file has not already been parsed into
# ConfigParser object, we'll do that now, so we
# can accurately construct our AnimatedSprite.
try:
anchor_config_ini.sections()
except AttributeError:
anchor_config_ini = load_ini(files, anchor_ini_name)
except KeyError:
anchor_config_ini = None
return AnimatedSprite.from_file(gif_bytesio, anchor_config_ini)
def load_ini(files, file_name):
"""Return a ConfigParser object based on a bytesio
object. This is a file handler.
Args:
files (dict): Resources files, whereas key is the file name,
and the value is a BytesIO object of said file.
file_name (str): File from "files" to use for making a
ConfigParser object.
Returns:
ConfigParser: --
See Also:
Resources.__init__()
"""
file_data = files[file_name]
# i used to do this in Resources.__init__()
file_data = file_data.decode('utf-8')
file_data = StringIO(file_data)
config = configparser.ConfigParser()
# NOTE: this still works in python 3, though it was
# replaced by config.read_file()
config.readfp(file_data)
return config
| mit |
moschlar/SAUCE | migration/versions/530b45f11128_public_submission.py | 1 | 1291 | """public_submission
Revision ID: 530b45f11128
Revises: 282efa88cdbc
Create Date: 2013-10-02 18:31:40.722832
"""
#
# # SAUCE - System for AUtomated Code Evaluation
# # Copyright (C) 2013 Moritz Schlarb
# #
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU Affero General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # any later version.
# #
# # This program is distributed in the hope that it will be useful,
# # but WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# # GNU Affero General Public License for more details.
# #
# # You should have received a copy of the GNU Affero General Public License
# # along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# revision identifiers, used by Alembic.
revision = '530b45f11128'
down_revision = '26d123af03a7'
from alembic import op
#from alembic.operations import Operations as op
import sqlalchemy as sa
def upgrade():
op.add_column('submissions',
sa.Column('public', sa.Boolean(), nullable=False,
default=False, server_default='False'))
def downgrade():
op.drop_column('submissions', 'public')
| agpl-3.0 |
40223143/cda-w15 | static/Brython3.1.3-20150514-095342/Lib/tarfile.py | 728 | 88474 | #!/usr/bin/env python3
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import io
import shutil
import stat
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
from builtins import open as _open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
def filemode(mode):
"""Deprecated in this location; use stat.filemode."""
import warnings
warnings.warn("deprecated in favor of stat.filemode",
DeprecationWarning, 2)
return stat.filemode(mode)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
self.exception = zlib.error
else:
self._init_write_gz()
elif comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
self.exception = IOError
else:
self.cmp = bz2.BZ2Compressor()
elif comptype == "xz":
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = lzma.LZMADecompressor()
self.exception = lzma.LZMAError
else:
self.cmp = lzma.LZMACompressor()
elif comptype != "tar":
raise CompressionError("unknown compression type %r" % comptype)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except self.exception:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\x1f\x8b\x08"):
return "gz"
elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
return "xz"
else:
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf-8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf-8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf-8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf-8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The file-object for extractfile().
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'r:xz' open for reading with lzma compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'w:xz' open for writing with lzma compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'r|xz' open an lzma compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
'w|xz' open an lzma compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
fileobj = bz2.BZ2File(fileobj or name, mode,
compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
@classmethod
def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
"""Open lzma compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (lzma.LZMAError, EOFError):
fileobj.close()
raise ReadError("not an lzma file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open", # bzip2 compressed tar
"xz": "xzopen" # lzma compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(stat.filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file or a
link, an io.BufferedReader object is returned. Otherwise, None is
returned.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Members with unknown types are treated as regular files.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except FileExistsError:
pass
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
with bltn_open(targetpath, "wb") as target:
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if self.index == 0 and self.tarfile.firstmember is not None:
tarinfo = self.tarfile.next()
elif self.index < len(self.tarfile.members):
tarinfo = self.tarfile.members[self.index]
elif not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
raise StopIteration
self.index += 1
return tarinfo
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| agpl-3.0 |
AustereCuriosity/astropy | astropy/units/format/vounit.py | 2 | 8246 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
from ...extern.six.moves import zip
import copy
import keyword
import operator
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/Documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(
r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
@staticmethod
def _generate_unit_names():
from ... import units as u
from ...units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'D', 'F', 'G', 'H', 'Hz', 'J', 'Jy', 'K', 'N',
'Ohm', 'Pa', 'R', 'Ry', 'S', 'T', 'V', 'W', 'Wb', 'a',
'adu', 'arcmin', 'arcsec', 'barn', 'beam', 'bin', 'cd',
'chan', 'count', 'ct', 'd', 'deg', 'eV', 'erg', 'g', 'h',
'lm', 'lx', 'lyr', 'm', 'mag', 'min', 'mol', 'pc', 'ph',
'photon', 'pix', 'pixel', 'rad', 'rad', 's', 'solLum',
'solMass', 'solRad', 'sr', 'u', 'voxel', 'yr'
]
binary_bases = [
'bit', 'byte', 'B'
]
simple_units = [
'Angstrom', 'angstrom', 'AU', 'au', 'Ba', 'dB', 'mas'
]
si_prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
binary_prefixes = [
'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei'
]
deprecated_units = set([
'a', 'angstrom', 'Angstrom', 'au', 'Ba', 'barn', 'ct',
'erg', 'G', 'ph', 'pix'
])
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ['pct', 'pcount', 'yd'])
do_defines(binary_bases, si_prefixes + binary_prefixes, ['dB', 'dbyte'])
do_defines(simple_units, [''])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ('unknown', 'UNKNOWN'):
return None
if s == '':
return core.dimensionless_unscaled
if s.count('/') > 1:
raise core.UnitsError(
"'{0}' contains multiple slashes, which is "
"disallowed by the VOUnit standard".format(s))
result = cls._do_parse(s, debug=debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported in "
"VOUnit.")
return result
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if cls._explicit_custom_unit_regex.match(unit):
return cls._def_custom_unit(unit)
if not cls._custom_unit_regex.match(unit):
raise ValueError()
warnings.warn(
"Unit {0!r} not supported by the VOUnit "
"standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)),
core.UnitsWarning)
return cls._def_custom_unit(unit)
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'VOUnit',
cls._to_decomposed_alternative)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
"In '{0}': VOUnit can not represent units with the 'da' "
"(deka) prefix".format(unit))
elif unit._represents.scale == 0.1:
raise ValueError(
"In '{0}': VOUnit can not represent units with the 'd' "
"(deci) prefix".format(unit))
name = unit.get_format_name('vounit')
if unit in six.itervalues(cls._custom_units):
return name
if name not in cls._units:
raise ValueError(
"Unit {0!r} is not part of the VOUnit standard".format(name))
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, 'VOUnit',
cls._to_decomposed_alternative)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={'vounit': name},
namespace=cls._custom_units)
else:
return core.def_unit(
name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix):]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(factor, [base_unit], [1],
_error_check=False),
format={'vounit': prefix + base_unit.names[-1]},
namespace=cls._custom_units)
return def_base(unit)
@classmethod
def to_string(cls, unit):
from .. import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.physical_type == 'dimensionless' and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
"Multiply your data by {0:e}."
.format(unit.scale))
s = ''
if unit.scale != 1:
m, ex = utils.split_mantissa_exponent(unit.scale)
parts = []
if m:
parts.append(m)
if ex:
fex = '10'
if not ex.startswith('-'):
fex += '+'
fex += ex
parts.append(fex)
s += ' '.join(parts)
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from .. import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
cls.to_string(unit), scale)
return s
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
moyogo/mutatormath | Lib/mutatorMath/test/ufo/test.py | 2 | 20386 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
These are tests for writing and processing designspace.designspace documents
- write various designspaces
- read them
- process them using the test fonts
- show masters can be read from different directories
- show instances can be generated into different directories
- do some basic output testing.
"""
import os
import defcon.objects.font
import mutatorMath.objects.error
from mutatorMath.ufo.document import DesignSpaceDocumentWriter, DesignSpaceDocumentReader
from mutatorMath.objects.location import Location
def stripPrefix(d):
# strip the "public.kern" prefixes from d
new = []
for pair, value in d:
a, b = pair
if "public.kern" in a:
a = a[13:]
if "public.kern" in b:
b = b[13:]
new.append(((a,b), value))
return sorted(new)
#t = [(('V', 'public.kern2.@MMK_R_A'), -100), (('public.kern1.@MMK_L_A', 'V'), -100)]
#stripPrefix(t)
def test1():
"""
>>> import time
>>> from mutatorMath.ufo.document import initializeLogger
>>> testRoot = os.path.join(os.path.dirname(__file__), 'data')
>>> documentPath = os.path.join(testRoot, 'exporttest_basic.designspace')
>>> sourcePath = os.path.join(testRoot, 'sources')
>>> instancePath = os.path.join(testRoot, 'instances')
>>> master1Path = os.path.join(testRoot, )
>>> #logPath = None #
>>> logPath = os.path.join(testRoot, "tests.log")
>>> if logPath is not None:
... if os.path.exists(logPath):
... os.remove(logPath)
... initializeLogger(logPath)
>>> ufoVersion=2
>>> roundGeometry=True # this will trigger some fails if run with a pre-ufo3 fontMath.
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> doc.addSource(
... os.path.join(sourcePath, "light", "LightCondensed.ufo"),
... name="master_1",
... location=dict(width=0),
... copyLib=True, # change to see the copy Lib test fail
... copyGroups=True, # change to see assertions fail on groupSource and instance groups.
... copyInfo=True,
... copyFeatures=True,
... muteKerning=False,
... muteInfo=True,
... mutedGlyphNames=['a',],
... familyName="ExplicitSourceFamilyName",
... styleName="ExplicitSourceStyleName",)
>>> doc.addSource(
... os.path.join(sourcePath, "light", "LightWide.ufo"),
... name="master_2",
... location=dict(width=1),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=True,
... muteInfo=True,
... mutedGlyphNames=['b'] )
>>> testOutputFileName = os.path.join(instancePath, "A", "testOutput_glyphs.ufo")
>>> glyphMasters = [('M', "master_1", dict(width=0)), ('M', "master_2", dict(width=1)), ]
>>> doc.startInstance(fileName=testOutputFileName,
... familyName="TestFamily",
... styleName="TestStyleName",
... location=dict(width=(0.2, 0.8)))
>>> doc.writeGlyph("M", unicodes=[0xff], location=dict(width=0.9), masters=None, note="testnote123")
>>> doc.writeGlyph("N", location=dict(width=0.7), masters=glyphMasters)
>>> doc.endInstance()
>>> doc.save()
>>> doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> doc.process(makeGlyphs=True, makeKerning=False, makeInfo=False)
# check if we found the UFOs
>>> assert "master_1" in doc.sources
>>> assert "master_2" in doc.sources
# check if we are reading the muting flags
>>> assert doc.libSource == 'master_1'
>>> assert doc.groupsSource == 'master_1'
>>> assert doc.libSource == 'master_1'
>>> assert doc.muted == {'info': ['master_1', 'master_2'], 'glyphs': {'master_2': ['b'], 'master_1': ['a']}, 'kerning': ['master_2']}
# check the locations
>>> fontObj, loc = doc.sources['master_1']
>>> loc.asTuple()
(('width', 0.0),)
>>> loc.asTuple()
(('width', 0.0),)
>>> fontObj, loc = doc.sources['master_2']
>>> loc.asTuple()
(('width', 1.0),)
# check the instances
>>> assert os.path.basename(testOutputFileName) in doc.results
>>> resultUFOPath = doc.results[os.path.basename(testOutputFileName)]
>>> instance = defcon.objects.font.Font(resultUFOPath)
# note: the next assertion will fail if the calculations were made with the
# pre-ufo3 fontMath.
>>> assert instance['M'].unicodes == [0xff]
# check the groups
>>> ('testGroup', ['E', 'F', 'H']) in list(instance.groups.items())
True
# check the lib
>>> assert "testLibItemKey" in instance.lib.keys()
# check the feature text was copied from the source
>>> assert "Hi_this_is_the_feature." in instance.features.text
# basic kerning processing.
>>> documentPath = os.path.join(testRoot, 'exporttest_kerning.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> doc.addSource(
... os.path.join(sourcePath, "light", "LightCondensed.ufo"),
... name="master_1",
... location=dict(weight=0),
... copyLib=True,
... copyGroups=True,
... copyInfo=True,
... muteKerning=False,
... muteInfo=False)
>>> doc.addSource(
... os.path.join(sourcePath, "bold", "BoldCondensed.ufo"),
... name="master_2",
... location=dict(weight=1),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=False,
... muteInfo=False )
>>> testOutputFileName = os.path.join(instancePath, "B", "testOutput_kerning.ufo")
>>> doc.startInstance(fileName=testOutputFileName, familyName="TestFamily", styleName="TestStyleName", location=dict(weight=0.6))
# give this kerning master a different location
>>> #doc.writeKerning(location=dict(weight=1))
>>> #doc.endInstance()
>>> #doc.save()
>>> #doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> #doc.process(makeGlyphs=False, makeKerning=True, makeInfo=False)
>>> #assert os.path.basename(testOutputFileName) in doc.results
>>> #resultUFOPath = doc.results[os.path.basename(testOutputFileName)]
>>> #instance = defcon.objects.font.Font(resultUFOPath)
>>> #assert sorted(instance.kerning.items()) == stripPrefix([(('@MMK_L_A', 'V'), 100), (('V', '@MMK_R_A'), 100)])
# test the effects of muting the kerning
>>> documentPath = os.path.join(testRoot, 'exporttest_kerning_muted.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> doc.addSource(
... os.path.join(sourcePath, "light", "LightCondensed.ufo"),
... name="master_1",
... location=dict(weight=0),
... copyLib=True,
... copyGroups=True,
... copyInfo=True,
... muteKerning=False,
... muteInfo=False)
>>> doc.addSource(
... os.path.join(sourcePath, "bold", "BoldCondensed.ufo"),
... name="master_2",
... location=dict(weight=1),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=True, # mute a master at a non-origin location!
... muteInfo=False )
>>> testOutputFileName = os.path.join(instancePath, "C", "testOutput_kerning_muted.ufo")
>>> testLocation = dict(weight=0.6) # change this location to see calculation assertions fail.
>>> doc.startInstance(fileName=testOutputFileName, familyName="TestFamily", styleName="TestStyleName", location=testLocation)
>>> doc.writeKerning()
>>> doc.endInstance()
>>> doc.save()
>>> doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> paths = doc.getSourcePaths()
>>> len(paths)
2
>>> doc.process(makeGlyphs=False, makeKerning=True, makeInfo=False)
>>> assert doc.groupsSource == 'master_1'
>>> assert os.path.basename(testOutputFileName) in doc.results
>>> resultUFOPath = doc.results[os.path.basename(testOutputFileName)]
>>> instance = defcon.objects.font.Font(resultUFOPath)
# the bold condensed kerning master has been muted, we expect the light condensed data in the instance
>>> assert [(('@MMK_L_A', 'V'), -100), (('V', '@MMK_R_A'), -100)] == stripPrefix(sorted(instance.kerning.items()))
# info data
# calculating fields
# copying fields
>>> documentPath = os.path.join(testRoot, 'exporttest_info.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> doc.addSource(
... os.path.join(sourcePath, "light", "LightCondensed.ufo"),
... name="master_1",
... location=dict(weight=0),
... copyLib=False,
... copyGroups=False,
... copyInfo=True, # flip to False and see some assertions fail
... muteKerning=True,
... muteInfo=False)
>>> doc.addSource(
... os.path.join(sourcePath, "bold", "BoldCondensed.ufo"),
... name="master_2",
... location=dict(weight=1),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=True,
... muteInfo=False )
>>> testOutputFileName = os.path.join(instancePath, "D", "testOutput_info.ufo")
>>> testLocation = dict(weight=0.5) # change this location to see calculation assertions fail.
>>> doc.startInstance(
... fileName=testOutputFileName,
... familyName="TestFamily",
... styleName="TestStyleName",
... location=testLocation,
... postScriptFontName="TestPostScriptFontNameValue",
... styleMapFamilyName="TestStyleMapFamilyNameValue",
... styleMapStyleName="bold italic",
... )
>>> doc.writeInfo()
>>> doc.endInstance()
>>> doc.save()
>>> doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> doc.process(makeGlyphs=False, makeKerning=False, makeInfo=True)
>>> assert os.path.basename(testOutputFileName) in doc.results
>>> resultUFOPath = doc.results[os.path.basename(testOutputFileName)]
>>> instance = defcon.objects.font.Font(resultUFOPath)
# example calculated values
>>> assert instance.info.ascender == 750
>>> assert instance.info.capHeight == 750
# example copied values
>>> assert instance.info.versionMajor == 1
>>> assert instance.info.openTypeOS2VendorID == "LTTR"
>>> assert instance.info.copyright == "License same as MutatorMath. BSD 3-clause. [test-token: C]"
# test the build script
>>> documentPath = os.path.join(testRoot, 'exporttest_build.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> doc.addSource(
... os.path.join(sourcePath, "light", "LightCondensed.ufo"),
... name="master_1",
... location=dict(weight=0),
... copyLib=True,
... copyGroups=True,
... copyInfo=True,
... muteKerning=False,
... muteInfo=False)
>>> doc.addSource(
... os.path.join(sourcePath, "bold", "BoldCondensed.ufo"),
... name="master_2",
... location=dict(weight=1),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=False,
... muteInfo=False )
>>> testOutputFileName = os.path.join(instancePath, "E", "testOutput_build.ufo")
>>> testLocation = dict(weight=0.25) # change this location to see calculation assertions fail.
>>> doc.startInstance(
... fileName=testOutputFileName,
... familyName="TestFamily",
... styleName="TestStyleName",
... location=testLocation)
>>> doc.writeInfo()
>>> doc.writeKerning()
>>> doc.endInstance()
>>> doc.save()
# test build function -- single designspace file
>>> from mutatorMath.ufo import build
>>> import os
>>> import posixpath
>>> here = os.path.join(os.path.dirname(__file__), 'data', 'exporttest_basic.designspace')
>>> results = build(here, outputUFOFormatVersion=2)
>>> ufoFullPath = results[0]['testOutput_glyphs.ufo']
>>> ufoRelPath = os.path.relpath(ufoFullPath, os.path.dirname(__file__))
>>> posixRelPath = posixpath.join(*ufoRelPath.split(os.path.sep))
>>> posixRelPath
'data/instances/A/testOutput_glyphs.ufo'
# test the axes elements
>>> documentPath = os.path.join(testRoot, 'warpmap_test.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> def grow(base, factor, steps):
... return [(i*100, int(round(base*(1+factor)**i))) for i in range(steps)]
>>> doc.addAxis("wght", "weight", 0, 1000, 0, grow(100,0.55,11))
>>> doc.addSource(
... os.path.join(sourcePath, "stems", "StemThin.ufo"),
... name="master_1",
... location=dict(weight=0),
... copyLib=True,
... copyGroups=True,
... copyInfo=True,
... muteKerning=False,
... muteInfo=False)
>>> doc.addSource(
... os.path.join(sourcePath, "stems", "StemBold.ufo"),
... name="master_2",
... location=dict(weight=1000),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=False,
... muteInfo=False )
>>> testOutputFileName = os.path.join(instancePath, "W", "StemOutput.ufo")
>>> testLocation = dict(weight=0) # change this location to see calculation assertions fail.
>>> doc.startInstance(
... fileName=testOutputFileName,
... familyName="TestFamily",
... styleName="Warped",
... location=testLocation)
>>> doc.writeInfo()
>>> doc.writeKerning()
>>> glyphMasters = [('I', "master_1", dict(weight=0)), ('I', "master_2", dict(weight=1000)), ]
>>> for i in range(0, 1000, 50):
... doc.writeGlyph("I.%04d"%i, location=dict(weight=i), masters=glyphMasters)
...
>>> doc.endInstance()
>>> doc.save()
>>> doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> doc.process(makeGlyphs=True, makeKerning=False, makeInfo=False)
>>> documentPath = os.path.join(testRoot, 'no_warpmap_test.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> doc.addAxis("wght", "weight", 0, 1000, 0)
>>> doc.addSource(
... os.path.join(sourcePath, "stems", "StemThin.ufo"),
... name="master_1",
... location=dict(weight=0),
... copyLib=True,
... copyGroups=True,
... copyInfo=True,
... muteKerning=False,
... muteInfo=False)
>>> doc.addSource(
... os.path.join(sourcePath, "stems", "StemBold.ufo"),
... name="master_2",
... location=dict(weight=1000),
... copyLib=False,
... copyGroups=False,
... copyInfo=False,
... muteKerning=False,
... muteInfo=False )
>>> testOutputFileName = os.path.join(instancePath, "W", "StemOutput_nowarp.ufo")
>>> testLocation = dict(weight=0) # change this location to see calculation assertions fail.
>>> doc.startInstance(
... fileName=testOutputFileName,
... familyName="TestFamily",
... styleName="NotWarped",
... location=testLocation)
>>> doc.writeInfo()
>>> doc.writeKerning()
>>> glyphMasters = [('I', "master_1", dict(weight=0)), ('I', "master_2", dict(weight=1000)), ]
>>> for i in range(0, 1000, 50):
... doc.writeGlyph("I.%04d"%i, location=dict(weight=i), masters=glyphMasters)
...
>>> doc.endInstance()
>>> doc.save()
>>> doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> doc.process(makeGlyphs=True, makeKerning=False, makeInfo=False)
# test the axes element
>>> from pprint import pprint
>>> documentPath = os.path.join(testRoot, 'axes_test.designspace')
>>> doc = DesignSpaceDocumentWriter(documentPath, verbose=True)
>>> def grow(base, factor, steps):
... return [(i*100, int(round(base*(1+factor)**i))) for i in range(steps)]
>>> # axis with a warp map
>>> warpMap = grow(100,0.55,11)
>>> doc.addAxis("wght", "weight", -1000, 1000, 0, warpMap)
>>> # axis without a warp map
>>> doc.addAxis("wdth", "width", 0, 1000, 0)
>>> doc.save()
>>> doc = DesignSpaceDocumentReader(documentPath, ufoVersion, roundGeometry=roundGeometry, verbose=True, logPath=logPath)
>>> pprint(doc.axes)
{'weight': {'default': 0.0,
'map': [(0.0, 100.0),
(100.0, 155.0),
(200.0, 240.0),
(300.0, 372.0),
(400.0, 577.0),
(500.0, 895.0),
(600.0, 1387.0),
(700.0, 2149.0),
(800.0, 3332.0),
(900.0, 5164.0),
(1000.0, 8004.0)],
'maximum': 1000.0,
'minimum': -1000.0,
'name': 'weight',
'tag': 'wght'},
'width': {'default': 0.0,
'map': [],
'maximum': 1000.0,
'minimum': 0.0,
'name': 'width',
'tag': 'wdth'}}
>>> doc.process(makeGlyphs=False, makeKerning=False, makeInfo=False)
"""
def bender_and_mutatorTest():
"""
>>> from mutatorMath.objects.bender import Bender
>>> from mutatorMath.objects.location import Location
>>> from mutatorMath.objects.mutator import buildMutator
>>> w = {'aaaa':{
... 'map': [(300, 50),
... (400, 100),
... (700, 150)],
... 'name':'aaaaAxis',
... 'tag':'aaaa',
... 'minimum':0,
... 'maximum':1000,
... 'default':0}}
>>> b = Bender(w)
>>> assert b(dict(aaaa=300)) == {'aaaa': 50}
>>> assert b(dict(aaaa=400)) == {'aaaa': 100}
>>> assert b(dict(aaaa=700)) == {'aaaa': 150}
master locations are always in internal design coordinates, thus they are
considered to be already mapped or bent.
>>> items = [
... (Location(aaaa=50), 0),
... (Location(aaaa=100), 50),
... (Location(aaaa=150), 100),
... ]
>>> bias, mut = buildMutator(items, w, bias=Location(aaaa=100))
>>> bias
<Location aaaa:100 >
>>> bias, mut = buildMutator(items, w, bias=Location(aaaa=150))
>>> bias
<Location aaaa:150 >
>>> bias, mut = buildMutator(items, w, bias=Location(aaaa=50))
>>> bias
<Location aaaa:50 >
>>> expect = sorted([(('aaaa', 100),), (('aaaa', 50),), ()])
>>> expect
[(), (('aaaa', 50),), (('aaaa', 100),)]
>>> got = sorted(mut.keys())
>>> got
[(), (('aaaa', 50),), (('aaaa', 100),)]
>>> assert got == expect
Instance location are not bent by default, i.e. are interpreted as internal
design coordinates:
>>> assert mut.makeInstance(Location(aaaa=50)) == 0
>>> assert mut.makeInstance(Location(aaaa=100)) == 50
>>> assert mut.makeInstance(Location(aaaa=150)) == 100
If bend=True, instance locations are interpreted as user-space coordinates
>>> assert mut.makeInstance(Location(aaaa=300), bend=True) == 0
>>> assert mut.makeInstance(Location(aaaa=400), bend=True) == 50
>>> assert mut.makeInstance(Location(aaaa=700), bend=True) == 100
"""
if __name__ == '__main__':
import sys
import doctest
sys.exit(doctest.testmod().failed)
| bsd-3-clause |
AloneRoad/Inforlearn | vendor/gdata/tlslite/integration/TLSSocketServerMixIn.py | 320 | 2203 | """TLS Lite + SocketServer."""
from gdata.tlslite.TLSConnection import TLSConnection
class TLSSocketServerMixIn:
"""
This class can be mixed in with any L{SocketServer.TCPServer} to
add TLS support.
To use this class, define a new class that inherits from it and
some L{SocketServer.TCPServer} (with the mix-in first). Then
implement the handshake() method, doing some sort of server
handshake on the connection argument. If the handshake method
returns True, the RequestHandler will be triggered. Below is a
complete example of a threaded HTTPS server::
from SocketServer import *
from BaseHTTPServer import *
from SimpleHTTPServer import *
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
sessionCache = SessionCache()
class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn,
HTTPServer):
def handshake(self, tlsConnection):
try:
tlsConnection.handshakeServer(certChain=certChain,
privateKey=privateKey,
sessionCache=sessionCache)
tlsConnection.ignoreAbruptClose = True
return True
except TLSError, error:
print "Handshake failure:", str(error)
return False
httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler)
httpd.serve_forever()
"""
def finish_request(self, sock, client_address):
tlsConnection = TLSConnection(sock)
if self.handshake(tlsConnection) == True:
self.RequestHandlerClass(tlsConnection, client_address, self)
tlsConnection.close()
#Implement this method to do some form of handshaking. Return True
#if the handshake finishes properly and the request is authorized.
def handshake(self, tlsConnection):
raise NotImplementedError()
| apache-2.0 |
mkrautz/gyp-libmumble | pylib/gyp/generator/xcode.py | 126 | 54475 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def EscapeXCodeArgument(s):
"""We must escape the arguments that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals."""
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return '"' + s + '"'
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
project_version = generator_flags.get('xcode_project_version', None)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
if project_version:
xcp.project_file.SetXcodeVersion(project_version)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
for copy_group in spec.get('copies', []):
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXCodeArgument(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause |
fowode/pychess | lib/pychess/Utils/lutils/ldraw.py | 22 | 2455 | from __future__ import absolute_import
from .ldata import BLACK_SQUARES
from pychess.Utils.const import *
def testFifty (board):
if board.fifty >= 100:
return True
return False
drawSet = set((
(0, 0, 0, 0, 0, 0, 0, 0), #KK
(0, 1, 0, 0, 0, 0, 0, 0), #KBK
(1, 0, 0, 0, 0, 0, 0, 0), #KNK
(0, 0, 0, 0, 0, 1, 0, 0), #KKB
(0, 0, 0, 0, 1, 0, 0, 0), #KNK
(1, 0, 0, 0, 0, 1, 0, 0), #KNKB
(0, 1, 0, 0, 1, 0, 0, 0), #KBKN
))
# Contains not 100% sure ones
drawSet2 = set((
(2, 0, 0, 0, 0, 0, 0, 0), #KNNK
(0, 0, 0, 0, 2, 0, 0, 0), #KKNN
(2, 0, 0, 0, 1, 0, 0, 0), #KNNKN
(1, 0, 0, 0, 2, 0, 0, 0), #KNKNN
(2, 0, 0, 0, 0, 1, 0, 0), #KNNKB
(0, 1, 0, 0, 2, 0, 0, 0), #KBKNN
(2, 0, 0, 0, 0, 0, 1, 0), #KNNKR
(0, 0, 1, 0, 2, 0, 0, 0) #KRKNN
))
def testMaterial (board):
""" Tests if no players are able to win the game from the current
position """
whitePieceCount = board.pieceCount[WHITE]
blackPieceCount = board.pieceCount[BLACK]
if whitePieceCount[PAWN] or blackPieceCount[PAWN]:
return False
if whitePieceCount[QUEEN] or blackPieceCount[QUEEN]:
return False
wn = whitePieceCount[KNIGHT]
wb = whitePieceCount[BISHOP]
wr = whitePieceCount[ROOK]
bn = blackPieceCount[KNIGHT]
bb = blackPieceCount[BISHOP]
br = blackPieceCount[ROOK]
if (wn, wb, wr, 0, bn, bb, br, 0) in drawSet:
return True
# Tests KBKB. Draw if bishops are of same color
if not wn + wr + bn + br and wb == 1 and bb == 1:
if board.boards[WHITE][BISHOP] & BLACK_SQUARES and True != \
board.boards[BLACK][BISHOP] & BLACK_SQUARES and True:
return True
def testPlayerMatingMaterial (board, color):
""" Tests if given color has enough material to mate on board """
pieceCount = board.pieceCount[color]
if pieceCount[PAWN] or pieceCount[QUEEN] or pieceCount[ROOK] \
or (pieceCount[KNIGHT] + pieceCount[BISHOP] > 1):
return True
return False
# This could be expanded by the fruit kpk draw function, which can test if a
# certain king verus king and pawn posistion is winable.
def test (board):
""" Test if the position is drawn. Two-fold repetitions are counted. """
return board.repetitionCount (drawThreshold=2) > 1 or \
testFifty (board) or \
testMaterial (board)
| gpl-3.0 |
glovebx/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/About.py | 293 | 3815 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
from com.sun.star.task import XJobExecutor
if __name__<>'package':
from lib.gui import *
class About(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
self.win = DBModalDialog(60, 50, 175, 115, "About Odoo Report Designer")
fdBigFont = createUnoStruct("com.sun.star.awt.FontDescriptor")
fdBigFont.Width = 20
fdBigFont.Height = 25
fdBigFont.Weight = 120
fdBigFont.Family= 3
oLabelTitle1 = self.win.addFixedText("lblTitle1", 1, 1, 35, 30)
oLabelTitle1.Model.TextColor = 16056320
oLabelTitle1.Model.FontDescriptor = fdBigFont
oLabelTitle1.Model.FontRelief = 1
oLabelTitle1.Text = "Open"
oLabelTitle2 = self.win.addFixedText("lblTitle2", 35, 1, 30, 30)
oLabelTitle2.Model.TextColor = 1
oLabelTitle2.Model.FontDescriptor = fdBigFont
oLabelTitle2.Model.FontRelief = 1
oLabelTitle2.Text = "ERP"
oLabelProdDesc = self.win.addFixedText("lblProdDesc", 1, 30, 173, 75)
oLabelProdDesc.Model.TextColor = 1
fdBigFont.Width = 10
fdBigFont.Height = 11
fdBigFont.Weight = 76
oLabelProdDesc.Model.FontDescriptor = fdBigFont
oLabelProdDesc.Model.Align = 1
oLabelProdDesc.Model.FontRelief = 1
oLabelProdDesc.Model.MultiLine = True
oLabelProdDesc.Text = "This package helps you to create or modify\nreports in Odoo. Once connected to the\nserver, you can design your template of reports\nusing fields and expressions and browsing the\ncomplete structure of Odoo object database."
oLabelFooter = self.win.addFixedText("lblFooter", -1, -1, 173, 25)
oLabelFooter.Model.TextColor = 255
#oLabelFooter.Model.BackgroundColor = 1
oLabelFooter.Model.Border = 2
oLabelFooter.Model.BorderColor = 255
fdBigFont.Width = 8
fdBigFont.Height = 9
fdBigFont.Weight = 100
oLabelFooter.Model.FontDescriptor = fdBigFont
oLabelFooter.Model.Align = 1
oLabelFooter.Model.FontRelief = 1
oLabelFooter.Model.MultiLine = True
sMessage = "Odoo Report Designer v1.0 \nCopyright 2007-TODAY Tiny sprl \nThis product is free software, under the GNU Affero General Public License."
oLabelFooter.Text = sMessage
self.win.doModalDialog("",None)
if __name__<>"package" and __name__=="__main__":
About(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( About, "org.openoffice.openerp.report.about", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andeplane/lammps | python/examples/matplotlib_plot.py | 22 | 2270 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
sys.path.append("./pizza")
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
fig.canvas.draw()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
| gpl-2.0 |
felipesanches/linux-media | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
dmitry-sobolev/ansible | lib/ansible/modules/network/nxos/_nxos_template.py | 67 | 5984 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: nxos_template
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco NXOS device configurations
description:
- Manages network device configurations over SSH or NXAPI. This module
allows implementers to work with the device running-config. It
provides a way to push a set of commands onto a network device
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
deprecated: Deprecated in 2.2. Use M(nxos_config) instead.
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: false
default: null
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: [ "true", "false" ]
include_defaults:
description:
- The module, by default, will collect the current device
running-config to use as a base for comparisons to the commands
in I(src). Setting this value to true will cause the module
to issue the command C(show running-config all) to include all
device settings.
required: false
default: false
choices: [ "true", "false" ]
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: [ "true", "false" ]
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
"""
EXAMPLES = """
- name: push a configuration onto the device
nxos_template:
src: config.j2
- name: forceable push a configuration onto the device
nxos_template:
src: config.j2
force: yes
- name: provide the base configuration for comparison
nxos_template:
src: candidate_config.txt
config: current_config.txt
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device
returned: when not check_mode
type: list
sample: ['...', '...']
"""
from ansible.module_utils.nxos import load_config, get_config
from ansible.module_utils.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
def get_current_config(module):
config = module.params.get('config')
if not config and not module.params['force']:
flags = []
if module.params['include_defaults']:
flags.append('all')
config = get_config(module, flags)
return config
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(),
force=dict(default=False, type='bool'),
include_defaults=dict(default=True, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(),
)
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False)
candidate = NetworkConfig(contents=module.params['src'], indent=2)
contents = get_current_config(module)
if contents:
config = NetworkConfig(contents=contents, indent=2)
result['__backup__'] = str(contents)
if not module.params['force']:
commands = candidate.difference(config)
commands = dumps(commands, 'commands').split('\n')
commands = [str(c) for c in commands if c]
else:
commands = str(candidate).split('\n')
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['updates'] = commands
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
diogoosorio/blog | src/blog_app/blog_app.py | 1 | 2792 | import uuid
import re
from flask import Flask, redirect, render_template, g, abort, request, make_response
from flask_ink.ink import Ink
from flask_caching import Cache
from .settings import SETTINGS, CACHE_SETTINGS
from .repository import LocalRepository
from .parsers import BlogParser
from .pagination import BlogPagination
def build_app():
_app = Flask(__name__)
_app.config.update(SETTINGS)
_cache = Cache(_app, config=CACHE_SETTINGS)
Ink(_app)
return [_app, _cache]
app, cache = build_app() # pylint: disable=invalid-name
@app.before_request
def before_request():
content_dir = app.config['REPO_DIRECTORY']
parser = BlogParser()
g.repository = LocalRepository(content_dir, parser, cache, app.config['PAGESIZE'])
# pagination
page = request.args.get('page')
page = int(page) if page is not None and page.isdigit() else 1
g.page = page
@app.route('/')
def index():
return redirect('/blog', 301)
@cache.cached(timeout=1200)
@app.route('/blog/')
def blog():
template_variables = g.repository.getfiles('entries', g.page)
template_variables['pagination'] = BlogPagination(
page=g.page,
total=template_variables['total'],
per_page=app.config['PAGESIZE']
)
if not template_variables['entries']:
abort(404)
return render_template('blog.html', **template_variables)
@app.route('/blog/rss/')
@cache.cached(timeout=1200)
def rss():
template_variables = g.repository.getfiles('entries', g.page)
g.repository.pagesize = 1
last_entry = g.repository.getfiles('entries', 1)
last_entry = last_entry['entries'][0] if last_entry['entries'] else None
template_variables['uuid'] = uuid
template_variables['last_entry'] = last_entry
response = make_response(render_template('atom.xml', **template_variables))
response.headers['Content-Type'] = 'application/atom+xml'
return response
@app.errorhandler(404)
def page_not_found(_e):
path = request.path
legacy_match = re.match(r'^/blog/entry/([\w-]+)/?$', path, re.I)
if legacy_match:
slug = legacy_match.group(1)
entry = g.repository.getfile('entries', slug)
if entry:
return redirect("/blog/{0}".format(slug), 301)
return render_template('404.html', path=path), 404
@cache.memoize(timeout=3600)
@app.route(u'/blog/<post_name>')
def blog_detail(post_name):
entry = g.repository.getfile('entries', post_name)
if not entry:
abort(404)
template_variables = {
'entry': entry,
'title': entry['meta'].get('title'),
'description': entry['meta'].get('description')
}
return render_template('detail.html', **template_variables)
if __name__ == '__main__':
app.run(host=app.config['HOST'])
| mit |
Belgabor/django | tests/regressiontests/generic_inline_admin/tests.py | 14 | 13276 | # coding: utf-8
from django.test import TestCase
from django.conf import settings
from django.contrib.contenttypes.generic import generic_inlineformset_factory
# local test models
from models import Episode, EpisodeExtra, EpisodeMaxNum, EpisodeExclude, \
Media, EpisodePermanent, MediaPermanentInline
class GenericAdminViewTest(TestCase):
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.failUnlessEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.failUnlessEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": u"This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": u"1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": u"0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": u"0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.failUnlessEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": u"This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": u"3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": u"2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": u"0",
"generic_inline_admin-media-content_type-object_id-0-id": u"%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": u"http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": u"%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": u"http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": u"",
"generic_inline_admin-media-content_type-object_id-2-url": u"",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.failUnlessEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEquals(len(formset.forms), 5)
self.assertEquals(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertEquals(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertEquals(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEquals(len(formset.forms), 5)
self.assertEquals(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertEquals(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertEquals(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEquals(len(formset.forms), 4)
self.assertEquals(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertEquals(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def testGenericInlineFormsetFactory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.failUnless(formset.get_queryset().ordered)
class GenericInlineAdminParametersTest(TestCase):
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def testNoParam(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def testExtraParam(self):
"""
With extra=0, there should be one form.
"""
e = self._create_object(EpisodeExtra)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeextra/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
e = self._create_object(EpisodeMaxNum)
inline_form_data = '<input type="hidden" name="generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" value="2" id="id_generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" value="1" id="id_generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" />'
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodemaxnum/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def testExcludeParam(self):
"""
Generic inline formsets should respect include.
"""
e = self._create_object(EpisodeExclude)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeexclude/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.failIf('url' in formset.forms[0], 'The formset has excluded "url" field.')
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdd(self):
post_data = {
"name": u"John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": u"1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": u"0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": u"0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.failUnlessEqual(response.status_code, 302) # redirect somewhere
class NoInlineDeletionTest(TestCase):
def test_no_deletion(self):
fake_site = object()
inline = MediaPermanentInline(EpisodePermanent, fake_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
| bsd-3-clause |
anurag03/integration_tests | cfme/configure/access_control/__init__.py | 1 | 58461 | import attr
import six
from navmazing import NavigateToSibling, NavigateToAttribute
from widgetastic.widget import Checkbox, View, Text, ConditionalSwitchableView
from widgetastic_patternfly import (
BootstrapSelect, Button, Input, Tab, CheckableBootstrapTreeview as CbTree,
BootstrapSwitch, CandidateNotFound, Dropdown)
from widgetastic_manageiq import (
UpDownSelect, PaginationPane, SummaryFormItem, Table, SummaryForm)
from widgetastic_manageiq.expression_editor import GroupTagExpressionEditor
from cfme.base.credential import Credential
from cfme.base.ui import ConfigurationView
from cfme.common import Taggable
from cfme.exceptions import CFMEException, RBACOperationBlocked
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from cfme.utils.wait import wait_for
EVM_DEFAULT_GROUPS = [
'evmgroup-super_administrator',
'evmgroup-administrator',
'evmgroup-approver',
'evmgroup-auditor',
'evmgroup-desktop',
'evmgroup-operator',
'evmgroup-security',
'evmgroup-support',
'evmgroup-user',
'evmgroup-vm_user'
]
class AccessControlToolbar(View):
""" Toolbar on the Access Control page """
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
####################################################################################################
# RBAC USER METHODS
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class UserForm(ConfigurationView):
""" User Form View."""
name_txt = Input(name='name')
userid_txt = Input(name='userid')
password_txt = Input(id='password')
password_verify_txt = Input(id='verify')
email_txt = Input(name='email')
user_group_select = BootstrapSelect(id='chosen_group')
cancel_button = Button('Cancel')
class UsersEntities(View):
table = Table("//div[@id='records_div' or @id='main_div']//table")
class AllUserView(ConfigurationView):
""" All Users View."""
toolbar = View.nested(AccessControlToolbar)
entities = View.nested(UsersEntities)
paginator = PaginationPane()
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Access Control EVM Users'
)
class AddUserView(UserForm):
""" Add User View."""
add_button = Button('Add')
@property
def is_displayed(self):
return self.accordions.accesscontrol.is_opened and self.title.text == "Adding a new User"
class DetailsUserEntities(View):
smart_management = SummaryForm('Smart Management')
class DetailsUserView(ConfigurationView):
""" User Details view."""
toolbar = View.nested(AccessControlToolbar)
entities = View.nested(DetailsUserEntities)
@property
def is_displayed(self):
return (
self.title.text == 'EVM User "{}"'.format(self.context['object'].name) and
self.accordions.accesscontrol.is_opened
)
class EditUserView(UserForm):
""" User Edit View."""
save_button = Button('Save')
reset_button = Button('Reset')
change_stored_password = Text('#change_stored_password')
cancel_password_change = Text('#cancel_password_change')
@property
def is_displayed(self):
return (
self.title.text == 'Editing User "{}"'.format(self.context['object'].name) and
self.accordions.accesscontrol.is_opened
)
@attr.s
class User(Updateable, Pretty, BaseEntity, Taggable):
""" Class represents an user in CFME UI
Args:
name: Name of the user
credential: User's credentials
email: User's email
groups: Add User to multiple groups in Versions >= 5.9.
cost_center: User's cost center
value_assign: user's value to assign
appliance: appliance under test
"""
pretty_attrs = ['name', 'group']
name = attr.ib(default=None)
credential = attr.ib(default=None)
email = attr.ib(default=None)
groups = attr.ib(default=None)
cost_center = attr.ib(default=None)
value_assign = attr.ib(default=None)
_restore_user = attr.ib(default=None, init=False)
def __enter__(self):
if self._restore_user != self.appliance.user:
logger.info('Switching to new user: %s', self.credential.principal)
self._restore_user = self.appliance.user
self.appliance.server.logout()
self.appliance.user = self
def __exit__(self, *args, **kwargs):
if self._restore_user != self.appliance.user:
logger.info('Restoring to old user: %s', self._restore_user.credential.principal)
self.appliance.server.logout()
self.appliance.user = self._restore_user
self._restore_user = None
def update(self, updates):
""" Update user method
Args:
updates: user data that should be changed
Note: In case updates is the same as original user data, update will be canceled,
as 'Save' button will not be active
"""
view = navigate_to(self, 'Edit')
self.change_stored_password()
new_updates = {}
if 'credential' in updates:
new_updates.update({
'userid_txt': updates.get('credential').principal,
'password_txt': updates.get('credential').secret,
'password_verify_txt': updates.get('credential').verify_secret
})
new_updates.update({
'name_txt': updates.get('name'),
'email_txt': updates.get('email'),
'user_group_select': getattr(
updates.get('group'),
'description', None)
})
changed = view.fill({
'name_txt': new_updates.get('name_txt'),
'userid_txt': new_updates.get('userid_txt'),
'password_txt': new_updates.get('password_txt'),
'password_verify_txt': new_updates.get('password_verify_txt'),
'email_txt': new_updates.get('email_txt'),
'user_group_select': new_updates.get('user_group_select')
})
if changed:
view.save_button.click()
flash_message = 'User "{}" was saved'.format(updates.get('name', self.name))
else:
view.cancel_button.click()
flash_message = 'Edit of User was cancelled by the user'
view = self.create_view(DetailsUserView, override=updates)
view.flash.assert_message(flash_message)
assert view.is_displayed
def copy(self):
""" Creates copy of existing user
return: User object of copied user
"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Copy this User to a new User')
view = self.create_view(AddUserView)
new_user = self.parent.instantiate(
name="{}copy".format(self.name),
credential=Credential(principal='redhat', secret='redhat')
)
view.fill({
'name_txt': new_user.name,
'userid_txt': new_user.credential.principal,
'password_txt': new_user.credential.secret,
'password_verify_txt': new_user.credential.verify_secret
})
view.add_button.click()
view = self.create_view(AllUserView)
view.flash.assert_success_message('User "{}" was saved'.format(new_user.name))
assert view.is_displayed
return new_user
def delete(self, cancel=True):
"""Delete existing user
Args:
cancel: Default value 'True', user will be deleted
'False' - deletion of user will be canceled
Throws:
RBACOperationBlocked: If operation is blocked due to current user
not having appropriate permissions OR delete is not allowed
for currently selected user
"""
flash_success_msg = 'EVM User "{}": Delete successful'.format(self.name)
flash_blocked_msg = "Default EVM User \"{}\" cannot be deleted".format(self.name)
delete_user_txt = 'Delete this User'
view = navigate_to(self, 'Details')
if not view.toolbar.configuration.item_enabled(delete_user_txt):
raise RBACOperationBlocked("Configuration action '{}' is not enabled".format(
delete_user_txt))
view.toolbar.configuration.item_select(delete_user_txt, handle_alert=cancel)
try:
view.flash.assert_message(flash_blocked_msg)
raise RBACOperationBlocked(flash_blocked_msg)
except AssertionError:
pass
view.flash.assert_message(flash_success_msg)
if cancel:
view = self.create_view(AllUserView)
view.flash.assert_success_message(flash_success_msg)
else:
view = self.create_view(DetailsUserView)
assert view.is_displayed
# TODO update elements, after 1469035 fix
def change_stored_password(self, changes=None, cancel=False):
""" Changes user password
Args:
changes: dict with fields to be changes,
if None, passwords fields only be anabled
cancel: True, if you want to disable password change
"""
view = navigate_to(self, 'Edit')
self.browser.execute_script(
self.browser.get_attribute(
'onClick', self.browser.element(view.change_stored_password)))
if changes:
view.fill(changes)
if cancel:
self.browser.execute_script(
self.browser.get_attribute(
'onClick', self.browser.element(view.cancel_password_change)))
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@property
def description(self):
return self.credential.principal
@property
def my_settings(self):
from cfme.configure.settings import MySettings
my_settings = MySettings(appliance=self.appliance)
return my_settings
@attr.s
class UserCollection(BaseCollection):
ENTITY = User
def simple_user(self, userid, password, fullname=None):
"""If a fullname is not supplied, userid is used for credential principal and user name"""
creds = Credential(principal=userid, secret=password)
return self.instantiate(name=fullname or userid, credential=creds)
def create(self, name=None, credential=None, email=None, groups=None, cost_center=None,
value_assign=None, cancel=False):
""" User creation method
Args:
name: Name of the user
credential: User's credentials, credential.principal is used as username
email: User's email
groups: Add User to multiple groups in Versions >= 5.9.
cost_center: User's cost center
value_assign: user's value to assign
cancel: True - if you want to cancel user creation,
by defaul user will be created
Throws:
RBACOperationBlocked: If operation is blocked due to current user
not having appropriate permissions OR update is not allowed
for currently selected role
"""
if self.appliance.version < "5.8":
user_blocked_msg = "Userid has already been taken"
else:
user_blocked_msg = ("Userid is not unique within region {}".format(
self.appliance.server.zone.region.number))
if type(groups) is not list:
groups = [groups]
if self.appliance.version < "5.9" and len(groups) > 1:
raise CFMEException(
"Assigning a user to multiple groups is only supported in CFME versions > 5.8")
user = self.instantiate(
name=name, credential=credential, email=email, groups=groups, cost_center=cost_center,
value_assign=value_assign
)
# view.fill supports iteration over a list when selecting pulldown list items but
# will throw an exception when the item doesn't appear in the list so filter out
# null items since they "shouldn't" exist
user_group_names = [getattr(ug, 'description', None) for ug in user.groups if ug]
fill_values = {
'name_txt': user.name,
'userid_txt': user.credential.principal,
'email_txt': user.email,
'user_group_select': user_group_names
}
# only fill password if auth_mode is set to Database
if self.appliance.server.authentication.auth_mode.lower() == 'database':
fill_values.update({
'password_txt': user.credential.secret,
'password_verify_txt': user.credential.verify_secret}
)
view = navigate_to(self, 'Add')
view.fill(fill_values)
if cancel:
view.cancel_button.click()
flash_message = 'Add of new User was cancelled by the user'
else:
view.add_button.click()
flash_message = 'User "{}" was saved'.format(user.name)
try:
view.flash.assert_message(user_blocked_msg)
raise RBACOperationBlocked(user_blocked_msg)
except AssertionError:
pass
view = self.create_view(AllUserView)
view.flash.assert_success_message(flash_message)
assert view.is_displayed
# To ensure tree update
view.browser.refresh()
return user
@navigator.register(UserCollection, 'All')
class UserAll(CFMENavigateStep):
VIEW = AllUserView
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Users')
@navigator.register(UserCollection, 'Add')
class UserAdd(CFMENavigateStep):
VIEW = AddUserView
def prerequisite(self):
navigate_to(self.obj.appliance.server, 'Configuration')
return navigate_to(self.obj, 'All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select("Add a new User")
@navigator.register(User, 'Details')
class UserDetails(CFMENavigateStep):
VIEW = DetailsUserView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
try:
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Users', self.obj.name)
except CandidateNotFound:
self.obj.appliance.browser.widgetastic.refresh()
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Users', self.obj.name)
@navigator.register(User, 'Edit')
class UserEdit(CFMENavigateStep):
VIEW = EditUserView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this User')
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# RBAC USER METHODS
####################################################################################################
####################################################################################################
# RBAC GROUP METHODS
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class MyCompanyTagsTree(View):
tree_locator = 'tags_treebox'
tree = CbTree(tree_locator)
class MyCompanyTagsExpressionView(View):
tag_expression = GroupTagExpressionEditor()
class MyCompanyTagsWithExpression(Tab):
""" Represents 'My company tags' tab in Group Form """
TAB_NAME = "My Company Tags"
tag_mode = BootstrapSelect(id='use_filter_expression')
tag_settings = ConditionalSwitchableView(reference='tag_mode')
tag_settings.register('Specific Tags', default=True, widget=MyCompanyTagsTree)
tag_settings.register('Tags Based On Expression', widget=MyCompanyTagsExpressionView)
class Hosts_And_Clusters(Tab): # noqa
""" Represents 'Hosts and Clusters' tab in Group Form """
TAB_NAME = "Hosts & Clusters"
tree = CbTree('hac_treebox')
class Vms_And_Templates(Tab): # noqa
""" Represents 'VM's and Templates' tab in Group Form """
TAB_NAME = "VMs & Templates"
tree = CbTree('vat_treebox')
class GroupForm(ConfigurationView):
""" Group Form in CFME UI."""
ldap_groups_for_user = BootstrapSelect(id='ldap_groups_user')
description_txt = Input(name='description')
lookup_ldap_groups_chk = Checkbox(name='lookup')
role_select = BootstrapSelect(id='group_role')
group_tenant = BootstrapSelect(id='group_tenant')
user_to_look_up = Input(name='user')
username = Input(name='user_id')
password = Input(name='password')
tag = SummaryFormItem('Smart Management', 'My Company Tags')
cancel_button = Button('Cancel')
retrieve_button = Button('Retrieve')
my_company_tags = View.nested(MyCompanyTagsWithExpression)
hosts_and_clusters = View.nested(Hosts_And_Clusters)
vms_and_templates = View.nested(Vms_And_Templates)
class AddGroupView(GroupForm):
""" Add Group View in CFME UI """
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == "Adding a new Group"
)
class DetailsGroupEntities(View):
smart_management = SummaryForm('Smart Management')
my_company_tags = View.nested(MyCompanyTagsWithExpression)
hosts_and_clusters = View.nested(Hosts_And_Clusters)
vms_and_templates = View.nested(Vms_And_Templates)
class DetailsGroupView(ConfigurationView):
""" Details Group View in CFME UI """
toolbar = View.nested(AccessControlToolbar)
entities = View.nested(DetailsGroupEntities)
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'EVM Group "{}"'.format(self.context['object'].description)
)
class EditGroupView(GroupForm):
""" Edit Group View in CFME UI """
save_button = Button("Save")
reset_button = Button('Reset')
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Editing Group "{}"'.format(self.context['object'].description)
)
class AllGroupView(ConfigurationView):
""" All Groups View in CFME UI """
toolbar = View.nested(AccessControlToolbar)
table = Table("//div[@id='main_div']//table")
paginator = PaginationPane()
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Access Control EVM Groups'
)
class EditGroupSequenceView(ConfigurationView):
""" Edit Groups Sequence View in CFME UI """
group_order_selector = UpDownSelect(
'#seq_fields',
'//button[@title="Move selected fields up"]/i',
'//button[@title="Move selected fields down"]/i')
save_button = Button('Save')
reset_button = Button('Reset')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == "Editing Sequence of User Groups"
)
@attr.s
class Group(BaseEntity, Taggable):
"""Represents a group in CFME UI
Properties:
description: group description
role: group role
tenant: group tenant
user_to_lookup: ldap user to lookup
ldap_credentials: ldap user credentials
tag: tag for group restriction
host_cluster: host/cluster for group restriction
vm_template: vm/template for group restriction
appliance: appliance under test
"""
pretty_attrs = ['description', 'role']
description = attr.ib(default=None)
role = attr.ib(default=None)
tenant = attr.ib(default="My Company")
ldap_credentials = attr.ib(default=None)
user_to_lookup = attr.ib(default=None)
tag = attr.ib(default=None)
host_cluster = attr.ib(default=None)
vm_template = attr.ib(default=None)
def _retrieve_ldap_user_groups(self):
""" Retrive ldap user groups
return: AddGroupView
"""
view = navigate_to(self.parent, 'Add')
view.fill({'lookup_ldap_groups_chk': True,
'user_to_look_up': self.user_to_lookup,
'username': self.ldap_credentials.principal,
'password': self.ldap_credentials.secret})
view.retrieve_button.click()
return view
def _retrieve_ext_auth_user_groups(self):
""" Retrive external authorization user groups
return: AddGroupView
"""
view = navigate_to(self.parent, 'Add')
view.fill({'lookup_ldap_groups_chk': True,
'user_to_look_up': self.user_to_lookup})
view.retrieve_button.click()
return view
def _fill_ldap_group_lookup(self, view):
""" Fills ldap info for group lookup
Args: view: view for group creation(AddGroupView)
"""
view.fill({'ldap_groups_for_user': self.description,
'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant})
view.add_button.click()
view = self.create_view(AllGroupView)
view.flash.assert_success_message('Group "{}" was saved'.format(self.description))
assert view.is_displayed
def add_group_from_ldap_lookup(self):
"""Adds a group from ldap lookup"""
view = self._retrieve_ldap_user_groups()
self._fill_ldap_group_lookup(view)
def add_group_from_ext_auth_lookup(self):
"""Adds a group from external authorization lookup"""
view = self._retrieve_ext_auth_user_groups()
self._fill_ldap_group_lookup(view)
def update(self, updates):
""" Update group method
Args:
updates: group data that should be changed
Note: In case updates is the same as original group data, update will be canceled,
as 'Save' button will not be active
"""
edit_group_txt = 'Edit this Group'
view = navigate_to(self, 'Details')
if not view.toolbar.configuration.item_enabled(edit_group_txt):
raise RBACOperationBlocked("Configuration action '{}' is not enabled".format(
edit_group_txt))
view = navigate_to(self, 'Edit')
changed = view.fill({
'description_txt': updates.get('description'),
'role_select': updates.get('role'),
'group_tenant': updates.get('tenant')
})
changed_tag = self._set_group_restriction(view.my_company_tags, updates.get('tag'))
changed_host_cluster = self._set_group_restriction(
view.hosts_and_clusters, updates.get('host_cluster'))
changed_vm_template = self._set_group_restriction(
view.vms_and_templates, updates.get('vm_template'))
if changed or changed_tag or changed_host_cluster or changed_vm_template:
view.save_button.click()
flash_message = 'Group "{}" was saved'.format(
updates.get('description', self.description))
else:
view.cancel_button.click()
flash_message = 'Edit of Group was cancelled by the user'
view = self.create_view(DetailsGroupView, override=updates)
view.flash.assert_message(flash_message)
assert view.is_displayed
def delete(self, cancel=True):
"""
Delete existing group
Args:
cancel: Default value 'True', group will be deleted
'False' - deletion of group will be canceled
Throws:
RBACOperationBlocked: If operation is blocked due to current user
not having appropriate permissions OR delete is not allowed
for currently selected group
"""
flash_success_msg = 'EVM Group "{}": Delete successful'.format(self.description)
flash_blocked_msg_list = [
('EVM Group "{}": '
'Error during delete: A read only group cannot be deleted.'.format(self.description)),
('EVM Group "{}": Error during delete: '
'The group has users assigned that do not '
'belong to any other group'.format(self.description))]
delete_group_txt = 'Delete this Group'
view = navigate_to(self, 'Details')
if not view.toolbar.configuration.item_enabled(delete_group_txt):
raise RBACOperationBlocked("Configuration action '{}' is not enabled".format(
delete_group_txt))
view.toolbar.configuration.item_select(delete_group_txt, handle_alert=cancel)
for flash_blocked_msg in flash_blocked_msg_list:
try:
view.flash.assert_message(flash_blocked_msg)
raise RBACOperationBlocked(flash_blocked_msg)
except AssertionError:
pass
view.flash.assert_no_error()
view.flash.assert_message(flash_success_msg)
if cancel:
view = self.create_view(AllGroupView)
view.flash.assert_success_message(flash_success_msg)
else:
view = self.create_view(DetailsGroupView)
assert view.is_displayed, (
"Access Control Group {} Detail View is not displayed".format(self.description))
def set_group_order(self, updated_order):
""" Sets group order for group lookup
Args:
updated_order: group order list
"""
if self.appliance.version < "5.9.2":
name_column = "Name"
else:
name_column = "Description"
find_row_kwargs = {name_column: self.description}
view = navigate_to(self.parent, 'All')
row = view.paginator.find_row_on_pages(view.table, **find_row_kwargs)
original_sequence = row.sequence.text
original_order = self.group_order[:len(updated_order)]
view = self.create_view(EditGroupSequenceView)
assert view.is_displayed
# We pick only the same amount of items for comparing
if updated_order == original_order:
return # Ignore that, would cause error on Save click
view.group_order_selector.fill(updated_order)
view.save_button.click()
view = self.create_view(AllGroupView)
assert view.is_displayed
row = view.paginator.find_row_on_pages(view.table, **find_row_kwargs)
changed_sequence = row.sequence.text
assert original_sequence != changed_sequence, "{} Group Edit Sequence Failed".format(
self.description)
def _set_group_restriction(self, tab_view, item, update=True):
""" Sets tag/host/template restriction for the group
Args:
tab_view: tab view
item: path to check box that should be selected/deselected
ex. _set_group_restriction([patent, child], True)
or tags expression(string) to be set in My company tags in expression editor
ex. _set_group_restriction('fill_tag(My Company Tags : Auto Approve - Max CPU, 1)'),
_set_group_restriction('delete_whole_expression')
update: If True - checkbox state will be updated
Returns: True - if update is successful
"""
updated_result = False
if item is not None:
if update:
if isinstance(item, six.string_types):
updated_result = tab_view.fill({
'tag_mode': 'Tags Based On Expression',
'tag_settings': {'tag_expression': item}})
else:
path, action_type = item
if isinstance(path, list):
tab_form = getattr(tab_view, 'form', tab_view)
tree_view = getattr(tab_form, 'tag_settings', tab_form)
node = (tree_view.tree.CheckNode(path) if action_type else
tree_view.tree.UncheckNode(path))
updated_result = tree_view.tree.fill(node)
return updated_result
@property
def group_order(self):
view = navigate_to(self, 'EditGroupSequence')
return view.group_order_selector.items
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@attr.s
class GroupCollection(BaseCollection):
""" Collection object for the :py:class: `cfme.configure.access_control.Group`. """
ENTITY = Group
def create(self, description=None, role=None, tenant="My Company", ldap_credentials=None,
user_to_lookup=None, tag=None, host_cluster=None, vm_template=None, cancel=False):
""" Create group method
Args:
description: group description
role: group role
tenant: group tenant
user_to_lookup: ldap user to lookup
ldap_credentials: ldap user credentials
tag: tag for group restriction
host_cluster: host/cluster for group restriction
vm_template: vm/template for group restriction
appliance: appliance under test
cancel: True - if you want to cancel group creation,
by default group will be created
Throws:
RBACOperationBlocked: If operation is blocked due to current user
not having appropriate permissions OR delete is not allowed
for currently selected user
"""
if self.appliance.version < "5.8":
flash_blocked_msg = ("Description has already been taken")
else:
flash_blocked_msg = "Description is not unique within region {}".format(
self.appliance.server.zone.region.number)
view = navigate_to(self, 'Add')
group = self.instantiate(
description=description, role=role, tenant=tenant, ldap_credentials=ldap_credentials,
user_to_lookup=user_to_lookup, tag=tag, host_cluster=host_cluster,
vm_template=vm_template)
view.fill({
'description_txt': group.description,
'role_select': group.role,
'group_tenant': group.tenant
})
group._set_group_restriction(view.my_company_tags, group.tag)
group._set_group_restriction(view.hosts_and_clusters, group.host_cluster)
group._set_group_restriction(view.vms_and_templates, group.vm_template)
if cancel:
view.cancel_button.click()
flash_message = 'Add of new Group was cancelled by the user'
else:
view.add_button.click()
flash_message = 'Group "{}" was saved'.format(group.description)
view = self.create_view(AllGroupView)
try:
view.flash.assert_message(flash_blocked_msg)
raise RBACOperationBlocked(flash_blocked_msg)
except AssertionError:
pass
view.flash.assert_success_message(flash_message)
assert view.is_displayed
# To ensure that the group list is updated
view.browser.refresh()
return group
@navigator.register(GroupCollection, 'All')
class GroupAll(CFMENavigateStep):
VIEW = AllGroupView
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Groups')
def resetter(self, *args, **kwargs):
self.obj.appliance.browser.widgetastic.browser.refresh()
@navigator.register(GroupCollection, 'Add')
class GroupAdd(CFMENavigateStep):
VIEW = AddGroupView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select("Add a new Group")
@navigator.register(Group, 'EditGroupSequence')
class EditGroupSequence(CFMENavigateStep):
VIEW = EditGroupSequenceView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select(
'Edit Sequence of User Groups for LDAP Look Up')
@navigator.register(Group, 'Details')
class GroupDetails(CFMENavigateStep):
VIEW = DetailsGroupView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Groups', self.obj.description)
@navigator.register(Group, 'Edit')
class GroupEdit(CFMENavigateStep):
VIEW = EditGroupView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Group')
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# END RBAC GROUP METHODS
####################################################################################################
####################################################################################################
# RBAC ROLE METHODS
####################################################################################################
class RoleForm(ConfigurationView):
""" Role Form for CFME UI """
name_txt = Input(name='name')
vm_restriction_select = BootstrapSelect(id='vm_restriction')
features_tree = CbTree("features_treebox")
cancel_button = Button('Cancel')
class AddRoleView(RoleForm):
""" Add Role View """
add_button = Button('Add')
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Adding a new Role'
)
class EditRoleView(RoleForm):
""" Edit Role View """
save_button = Button('Save')
reset_button = Button('Reset')
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Editing Role "{}"'.format(self.context['object'].name)
)
class DetailsRoleView(RoleForm):
""" Details Role View """
toolbar = View.nested(AccessControlToolbar)
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Role "{}"'.format(self.context['object'].name)
)
class AllRolesView(ConfigurationView):
""" All Roles View """
toolbar = View.nested(AccessControlToolbar)
table = Table("//div[@id='main_div']//table")
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Access Control Roles'
)
@attr.s
class Role(Updateable, Pretty, BaseEntity):
""" Represents a role in CFME UI
Args:
name: role name
vm_restriction: restriction used for role
product_features: product feature to select
appliance: appliance unter test
"""
pretty_attrs = ['name', 'product_features']
name = attr.ib(default=None)
vm_restriction = attr.ib(default=None)
product_features = attr.ib(default=None)
def __attrs_post_init__(self):
if not self.product_features:
self.product_features = []
def update(self, updates):
""" Update role method
Args:
updates: role data that should be changed
Note: In case updates is the same as original role data, update will be canceled,
as 'Save' button will not be active
"""
flash_blocked_msg = "Read Only Role \"{}\" can not be edited".format(self.name)
edit_role_txt = 'Edit this Role'
view = navigate_to(self, 'Details')
# TODO: Remove following code when toolbar disappear issue (BZ1630012) get patched
if not view.toolbar.configuration.is_displayed:
view.browser.refresh()
if not view.toolbar.configuration.item_enabled(edit_role_txt):
raise RBACOperationBlocked("Configuration action '{}' is not enabled".format(
edit_role_txt))
view = navigate_to(self, 'Edit')
try:
view.flash.assert_message(flash_blocked_msg)
raise RBACOperationBlocked(flash_blocked_msg)
except AssertionError:
pass
changed = view.fill({
'name_txt': updates.get('name'),
'vm_restriction_select': updates.get('vm_restriction')
})
feature_changed = self.set_role_product_features(view, updates.get('product_features'))
if changed or feature_changed:
view.save_button.click()
flash_message = 'Role "{}" was saved'.format(updates.get('name', self.name))
else:
view.cancel_button.click()
flash_message = 'Edit of Role was cancelled by the user'
view = self.create_view(DetailsRoleView, override=updates)
view.flash.assert_message(flash_message)
# Typically this would be a safe check but BZ 1561698 will sometimes cause the accordion
# to fail to update the role name w/o a manual refresh causing is_displayed to fail
# Instead of inserting a blind refresh, just disable this until the bug is resolved since
# it's a good check for accordion UI failures
# See BZ https://bugzilla.redhat.com/show_bug.cgi?id=1561698
if not BZ(1561698, forced_streams=['5.9']).blocks:
assert view.is_displayed
def delete(self, cancel=True):
""" Delete existing role
Args:
cancel: Default value 'True', role will be deleted
'False' - deletion of role will be canceled
Throws:
RBACOperationBlocked: If operation is blocked due to current user
not having appropriate permissions OR delete is not allowed
for currently selected role
"""
flash_blocked_msg = ("Role \"{}\": Error during delete: Cannot delete record "
"because of dependent entitlements".format(self.name))
flash_success_msg = 'Role "{}": Delete successful'.format(self.name)
delete_role_txt = 'Delete this Role'
view = navigate_to(self, 'Details')
if not view.toolbar.configuration.item_enabled(delete_role_txt):
raise RBACOperationBlocked("Configuration action '{}' is not enabled".format(
delete_role_txt))
view.toolbar.configuration.item_select(delete_role_txt, handle_alert=cancel)
try:
view.flash.assert_message(flash_blocked_msg)
raise RBACOperationBlocked(flash_blocked_msg)
except AssertionError:
pass
view.flash.assert_message(flash_success_msg)
if cancel:
view = self.create_view(AllRolesView)
view.flash.assert_success_message(flash_success_msg)
else:
view = self.create_view(DetailsRoleView)
assert view.is_displayed
def copy(self, name=None):
""" Creates copy of existing role
Returns: Role object of copied role
"""
if name is None:
name = "{}_copy".format(self.name)
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Copy this Role to a new Role')
view = self.create_view(AddRoleView)
new_role = self.parent.instantiate(name=name)
view.fill({'name_txt': new_role.name})
view.add_button.click()
view = self.create_view(AllRolesView)
view.flash.assert_success_message('Role "{}" was saved'.format(new_role.name))
assert view.is_displayed
return new_role
def set_role_product_features(self, view, product_features):
""" Sets product features for role restriction
Args:
view: AddRoleView or EditRoleView
product_features: list of product features with options to select
"""
if product_features is not None and isinstance(product_features, (list, tuple, set)):
changes = [
view.fill({
'features_tree': CbTree.CheckNode(path) if option else CbTree.UncheckNode(path)
})
for path, option in product_features
]
return True in changes
else:
return False
@attr.s
class RoleCollection(BaseCollection):
ENTITY = Role
def create(self, name=None, vm_restriction=None, product_features=None, cancel=False):
""" Create role method
Args:
cancel: True - if you want to cancel role creation,
by default, role will be created
Raises:
RBACOperationBlocked: If operation is blocked due to current user
not having appropriate permissions OR update is not allowed
for currently selected role
"""
flash_blocked_msg = "Name has already been taken"
role = self.instantiate(
name=name, vm_restriction=vm_restriction, product_features=product_features
)
view = navigate_to(self, 'Add')
view.fill({'name_txt': role.name,
'vm_restriction_select': role.vm_restriction})
role.set_role_product_features(view, role.product_features)
if cancel:
view.cancel_button.click()
flash_message = 'Add of new Role was cancelled by the user'
else:
view.add_button.click()
flash_message = 'Role "{}" was saved'.format(role.name)
view = self.create_view(AllRolesView)
try:
view.flash.assert_message(flash_blocked_msg)
raise RBACOperationBlocked(flash_blocked_msg)
except AssertionError:
pass
view.flash.assert_success_message(flash_message)
assert view.is_displayed
return role
@navigator.register(RoleCollection, 'All')
class RoleAll(CFMENavigateStep):
VIEW = AllRolesView
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Roles')
@navigator.register(RoleCollection, 'Add')
class RoleAdd(CFMENavigateStep):
VIEW = AddRoleView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select("Add a new Role")
@navigator.register(Role, 'Details')
class RoleDetails(CFMENavigateStep):
VIEW = DetailsRoleView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.browser.refresh() # workaround for 5.9 issue of role now shown
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Roles', self.obj.name)
@navigator.register(Role, 'Edit')
class RoleEdit(CFMENavigateStep):
VIEW = EditRoleView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Role')
####################################################################################################
# RBAC TENANT METHODS
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class TenantForm(ConfigurationView):
""" Tenant Form """
name = Input(name='name')
description = Input(name='description')
add_button = Button('Add')
cancel_button = Button('Cancel')
class TenantQuotaForm(View):
cpu_cb = BootstrapSwitch(id='cpu_allocated')
memory_cb = BootstrapSwitch(id='mem_allocated')
storage_cb = BootstrapSwitch(id='storage_allocated')
vm_cb = BootstrapSwitch(id='vms_allocated')
template_cb = BootstrapSwitch(id='templates_allocated')
cpu_txt = Input(id='id_cpu_allocated')
memory_txt = Input(id='id_mem_allocated')
storage_txt = Input(id='id_storage_allocated')
vm_txt = Input(id='id_vms_allocated')
template_txt = Input(id='id_templates_allocated')
class TenantQuotaView(ConfigurationView):
""" Tenant Quota View """
form = View.nested(TenantQuotaForm)
save_button = Button('Save')
reset_button = Button('Reset')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
return (
self.form.template_cb.is_displayed and
self.title.text == 'Manage quotas for {} "{}"'.format(self.context['object'].obj_type,
self.context['object'].name))
class AllTenantView(ConfigurationView):
""" All Tenants View """
toolbar = View.nested(AccessControlToolbar)
table = Table('//*[@id="miq-gtl-view"]/miq-data-table/div/table')
paginator = PaginationPane()
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == 'Access Control Tenants'
)
class AddTenantView(ConfigurationView):
""" Add Tenant View """
form = View.nested(TenantForm)
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.form.description.is_displayed and
self.title.text in ('Adding a new Project', 'Adding a new Tenant')
)
class DetailsTenantEntities(View):
smart_management = SummaryForm('Smart Management')
class DetailsTenantView(ConfigurationView):
""" Details Tenant View """
entities = View.nested(DetailsTenantEntities)
# Todo move to entities
toolbar = View.nested(AccessControlToolbar)
name = Text('Name')
description = Text('Description')
parent = Text('Parent')
table = Table('//*[self::fieldset or @id="fieldset"]/table')
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == '{} "{}"'.format(self.context['object'].obj_type,
self.context['object'].name)
)
class ParentDetailsTenantView(DetailsTenantView):
""" Parent Tenant Details View """
@property
def is_displayed(self):
return (
self.accordions.accesscontrol.is_opened and
self.title.text == '{} "{}"'.format(self.context['object'].parent_tenant.obj_type,
self.context['object'].parent_tenant.name)
)
class EditTenantView(View):
""" Edit Tenant View """
form = View.nested(TenantForm)
save_button = Button('Save')
reset_button = Button('Reset')
@property
def is_displayed(self):
return (
self.form.accordions.accesscontrol.is_opened and
self.form.description.is_displayed and
self.form.title.text == 'Editing {} "{}"'.format(self.context['object'].obj_type,
self.context['object'].name)
)
@attr.s
class Tenant(Updateable, BaseEntity, Taggable):
""" Class representing CFME tenants in the UI.
* Kudos to mfalesni *
The behaviour is shared with Project, which is the same except it cannot create more nested
tenants/projects.
Args:
name: Name of the tenant
description: Description of the tenant
parent_tenant: Parent tenant, can be None, can be passed as string or object
"""
obj_type = 'Tenant'
name = attr.ib()
description = attr.ib(default="")
parent_tenant = attr.ib(default=None)
_default = attr.ib(default=False)
def update(self, updates):
""" Update tenant/project method
Args:
updates: tenant/project data that should be changed
Note: In case updates is the same as original tenant/project data, update will be canceled,
as 'Save' button will not be active
"""
view = navigate_to(self, 'Edit')
changed = view.form.fill(updates)
if changed:
view.save_button.click()
if self.appliance.version < '5.9':
flash_message = 'Project "{}" was saved'.format(updates.get('name', self.name))
else:
flash_message = '{} "{}" has been successfully saved.'.format(
self.obj_type, updates.get('name', self.name))
else:
view.cancel_button.click()
if self.appliance.version < '5.9':
flash_message = 'Edit of Project "{}" was cancelled by the user'.format(
updates.get('name', self.name))
else:
flash_message = 'Edit of {} "{}" was canceled by the user.'.format(
self.obj_type, updates.get('name', self.name))
view = self.create_view(DetailsTenantView, override=updates)
view.flash.assert_message(flash_message)
def delete(self, cancel=True):
""" Delete existing role
Args:
cancel: Default value 'True', role will be deleted
'False' - deletion of role will be canceled
"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select(
'Delete this item', handle_alert=cancel)
if cancel:
view = self.create_view(ParentDetailsTenantView)
view.flash.assert_success_message(
'Tenant "{}": Delete successful'.format(self.description))
else:
view = self.create_view(DetailsRoleView)
assert view.is_displayed
def set_quota(self, **kwargs):
""" Sets tenant quotas """
view = navigate_to(self, 'ManageQuotas')
changed = view.form.fill({'cpu_cb': kwargs.get('cpu_cb'),
'cpu_txt': kwargs.get('cpu'),
'memory_cb': kwargs.get('memory_cb'),
'memory_txt': kwargs.get('memory'),
'storage_cb': kwargs.get('storage_cb'),
'storage_txt': kwargs.get('storage'),
'vm_cb': kwargs.get('vm_cb'),
'vm_txt': kwargs.get('vm'),
'template_cb': kwargs.get('template_cb'),
'template_txt': kwargs.get('template')})
if changed:
view.save_button.click()
expected_msg = 'Quotas for {} "{}" were saved'.format(self.obj_type, self.name)
else:
view.cancel_button.click()
expected_msg = 'Manage quotas for {} "{}" was cancelled by the user'\
.format(self.obj_type, self.name)
view = self.create_view(DetailsTenantView)
view.flash.assert_success_message(expected_msg)
assert view.is_displayed
@property
def quota(self):
view = navigate_to(self, 'Details')
quotas = {
'cpu': 'Allocated Virtual CPUs',
'memory': 'Allocated Memory in GB',
'storage': 'Allocated Storage in GB',
'num_vms': 'Allocated Number of Virtual Machines',
'templates': 'Allocated Number of Templates'
}
for field in quotas:
item = view.table.row(name=quotas[field])
quotas[field] = {
'total': item.total_quota.text,
'in_use': item.in_use.text,
'allocated': item.allocated.text,
'available': item.available.text
}
return quotas
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return self.tree_path == other.tree_path
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@property
def tree_path(self):
if self._default:
return [self.name]
else:
return self.parent_tenant.tree_path + [self.name]
@property
def parent_path(self):
return self.parent_tenant.tree_path
@attr.s
class TenantCollection(BaseCollection):
"""Collection class for Tenant"""
ENTITY = Tenant
def get_root_tenant(self):
return self.instantiate(str(self.appliance.rest_api.collections.tenants[0].name),
default=True)
def create(self, name, description, parent):
if self.appliance.version > '5.9':
tenant_success_flash_msg = 'Tenant "{}" has been successfully added.'
else:
tenant_success_flash_msg = 'Tenant "{}" was saved'
tenant = self.instantiate(name, description, parent)
view = navigate_to(tenant.parent_tenant, 'Details')
view.toolbar.configuration.item_select('Add child Tenant to this Tenant')
view = self.create_view(AddTenantView)
wait_for(lambda: view.is_displayed, timeout=5)
changed = view.form.fill({'name': name,
'description': description})
if changed:
view.form.add_button.click()
else:
view.form.cancel_button.click()
view = self.create_view(ParentDetailsTenantView)
view.flash.assert_success_message(tenant_success_flash_msg.format(name))
return tenant
def delete(self, *tenants):
view = navigate_to(self, 'All')
for tenant in tenants:
try:
row = view.table.row(name=tenant.name)
row[0].check()
except Exception:
logger.exception('Failed to check element "%s"', tenant.name)
else:
view.toolbar.configuration.item_select('Delete selected items', handle_alert=True)
@navigator.register(TenantCollection, 'All')
class TenantAll(CFMENavigateStep):
VIEW = AllTenantView
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Tenants')
@navigator.register(Tenant, 'Details')
class TenantDetails(CFMENavigateStep):
VIEW = DetailsTenantView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.accordions.accesscontrol.tree.click_path(
self.obj.appliance.server_region_string(), 'Tenants', *self.obj.tree_path)
@navigator.register(Tenant, 'Edit')
class TenantEdit(CFMENavigateStep):
VIEW = EditTenantView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this item')
@navigator.register(Tenant, 'ManageQuotas')
class TenantManageQuotas(CFMENavigateStep):
VIEW = TenantQuotaView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Manage Quotas')
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# END TENANT METHODS
####################################################################################################
####################################################################################################
# RBAC PROJECT METHODS
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class Project(Tenant):
""" Class representing CFME projects in the UI.
Project cannot create more child tenants/projects.
Args:
name: Name of the project
description: Description of the project
parent_tenant: Parent project, can be None, can be passed as string or object
"""
obj_type = 'Project'
class ProjectCollection(TenantCollection):
"""Collection class for Projects under Tenants"""
ENTITY = Project
def get_root_tenant(self):
# returning Tenant directly because 'My Company' needs to be treated like Tenant object,
# to be able to make child tenant/project under it
return self.appliance.collections.tenants.instantiate(
name=str(self.appliance.rest_api.collections.tenants[0].name), default=True)
def create(self, name, description, parent):
if self.appliance.version > '5.9':
project_success_flash_msg = 'Project "{}" has been successfully added.'
else:
project_success_flash_msg = 'Project "{}" was saved'
project = self.instantiate(name, description, parent)
view = navigate_to(project.parent_tenant, 'Details')
view.toolbar.configuration.item_select('Add Project to this Tenant')
view = self.create_view(AddTenantView)
wait_for(lambda: view.is_displayed, timeout=5)
changed = view.form.fill({'name': name,
'description': description})
if changed:
view.form.add_button.click()
else:
view.form.cancel_button.click()
view = self.create_view(ParentDetailsTenantView)
view.flash.assert_success_message(project_success_flash_msg.format(name))
return project
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# END PROJECT METHODS
####################################################################################################
| gpl-2.0 |
korrosivesec/crits | crits/core/management/commands/create_locations.py | 26 | 2050 | import json
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from crits.locations.location import Location
class Command(BaseCommand):
"""
Script Class.
"""
help = 'Creates location objects in MongoDB.'
def handle(self, *args, **options):
"""
Script Execution.
"""
add_location_objects(True)
def add_location_objects(drop=False):
"""
Add location objects to the system.
:param drop: Drop collection before adding.
:type drop: boolean
"""
f = os.path.join(settings.SITE_ROOT,
'..',
'extras',
'countries.json')
locations = open(f, 'r')
cdata = locations.read()
data = json.loads(cdata)
if not drop:
print "Drop protection does not apply to location objects"
Location.drop_collection()
count = 0
for location in data:
l = Location()
l.name = location['name']['official']
l.calling_code = get_value(location['callingCode'])
l.cca2 = location['cca2']
l.cca3 = location['cca3']
l.ccn3 = location['ccn3']
l.cioc = location['cioc']
l.region = location['region']
l.sub_region = location['subregion']
l.latitude = get_lat(location['latlng'])
l.longitude = get_long(location['latlng'])
l.save()
count += 1
print "Added %s Location Objects." % count
def get_value(value):
v = None
if isinstance(value, list):
if len(value) < 1:
return v
else:
v = value[0]
else:
v = value
return v
def get_lat(value):
if isinstance(value, list):
if len(value) < 2:
return None
else:
return value[0]
def get_long(value):
if isinstance(value, list):
if len(value) < 2:
return None
else:
return value[1]
| mit |
YinongLong/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
shastah/spacewalk | backend/server/action/utils.py | 10 | 11613 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from spacewalk.server import rhnSQL, rhnAction
from spacewalk.server.rhnDependency import find_package_with_arch
from spacewalk.server.rhnChannel import channels_for_server
class PackageNotFound(Exception):
pass
class NoActionInfo(Exception):
pass
class SubscribedChannel:
"""
SubscribedChannel represents a channel to which the server is subscribed.
"""
def __init__(self, server_id, channel_lookup_string):
"""
Constructor.
server_id is a string containing the unique number that the
database has assigned to the server.
channel_lookup_string is a string that the _get_channel_info function
uses to look up the correct channel by channel label. It does NOT have
to be the entire channel label, but it does have to occur at the beginning
of the channel label. For instance "rhn-tools" would match any of the
rhn-tools channels because they all begin with "rhn-tools". It can also be
the entire channel label, of course.
"""
self.server_id = server_id
self.found_channel = None
self.channel_id = None
self.channel_lookup_string = channel_lookup_string
self.channel_label = None
def _get_channel_info(self):
"""
Looks up the correct channel based on channel_lookup_string.
Populates the id, label, and a boolean that tells whether the
channel is found.
"""
subscribed_channels = channels_for_server(self.server_id)
# Our tools channels all start with "rhn-tools", which seems
# to be the only way to reliably tell one channel from the other
# automagically.
self.found_tools_channel = False
for channel_info in subscribed_channels:
label_position = channel_info['label'].find(self.channel_lookup_string)
if label_position > -1 and label_position == 0:
self.found_channel = True
self.channel_id = channel_info['id']
self.channel_label = channel_info['label']
def is_subscribed_to_channel(self):
"""
Returns True if server_id is subscribed to the
channel, False otherwise
"""
if not self.found_channel:
self._get_channel_info()
return self.found_channel
def get_channel_id(self):
"""
Returns the channel's unique id.
"""
if not self.channel_id:
self._get_channel_info()
return self.channel_id
def get_channel_label(self):
"""
Returns the channel's label.
"""
if not self.channel_label:
self._get_channel_info()
return self.channel_label
class ChannelPackage:
"""
Represents a package contained in a channel that the server is
subscribed to.
"""
def __init__(self, server_id, package_name):
"""
Constructor.
server_id is the unique value assigned to the server by the db.
package_name is a string containing the name of the package
to be looked up.
"""
self.server_id = server_id
self.package_name = package_name
self.package_info = None
self.id = None
self.version = None
self.release = None
self.epoch = None
self.arch = None
self.name_id = None
self.evr_id = None
self.arch_id = None
self.id_index = 0
self.name_index = 1
self.version_index = 2
self.release_index = 3
self.epoch_index = 4
self.arch_index = 5
def _get_package_info(self):
"""
"Private" function that retrieves info about the package.
Populates self.package_info, self.id, self.version, self.release, and self.epoch.
"""
# Get info on the package we want to install.
possible_packages = find_package_with_arch(self.server_id, [self.package_name])
# There's a possibility, however slight, that more than one package
# may be returned by find_by_packages. If that's the case, we only
# want the info about package_name.
package_info = None
if self.package_name in possible_packages:
for package in possible_packages[self.package_name]:
if package[self.name_index] == self.package_name:
self.package_info = package
self.id = package[self.id_index]
self.version = package[self.version_index]
self.release = package[self.release_index]
self.epoch = package[self.epoch_index]
self.arch = package[self.arch_index]
def _get_package_field_ids(self):
"""
"Private" function that retrieves the database id's for the name, EVR, and
package architecture and sets self.name_id, self.evr_id, and self.arch_id to
their values.
"""
package_id = self.get_id()
if not package_id:
raise PackageNotFound("ID for package %s was not found." % self.get_name())
_package_info_query = rhnSQL.Statement("""
select
p.name_id name_id,
p.evr_id evr_id,
p.package_arch_id arch_id
from
rhnPackage p
where
p.id = :package_id
""")
prepared_query = rhnSQL.prepare(_package_info_query)
prepared_query.execute(package_id=package_id)
package_info_results = prepared_query.fetchone_dict()
if not package_info_results:
raise PackageNotFound("Name, EVR, and Arch info not found for %s" % self.get_name())
self.name_id = package_info_results['name_id']
self.evr_id = package_info_results['evr_id']
self.arch_id = package_info_results['arch_id']
def exists(self):
"""
Returns True if the package is available for the server according to the db,
False otherwise.
"""
if not self.package_info:
self._get_package_info()
if not self.package_info:
return False
else:
return True
def get_name_id(self):
"""
Returns the name_id of the package.
"""
if not self.name_id:
self._get_package_field_ids()
return self.name_id
def get_evr_id(self):
"""
Returns the evr_id of the package.
"""
if not self.evr_id:
self._get_package_field_ids()
return self.evr_id
def get_arch_id(self):
"""
Returns the arch_id of the package.
"""
if not self.arch_id:
self._get_package_field_ids()
return self.arch_id
def get_id(self):
"""
Returns the id of the package.
"""
if not self.id:
self._get_package_field_ids()
return self.id
def get_name(self):
"""
Returns the name of the package.
"""
return self.package_name
def get_version(self):
"""
Returns the version of the package.
"""
if not self.version:
self._get_package_info()
return self.version
def get_release(self):
"""
Returns the release of the package.
"""
if not self.release:
self._get_package_info()
return self.release
def get_epoch(self):
"""
Returns the epoch of the package.
"""
if not self.epoch:
self._get_package_info()
return self.epoch
def get_arch(self):
"""
Returns the arch of the package.
"""
if not self.arch:
self._get_package_info()
return self.arch
class PackageInstallScheduler:
"""
Class responsible for scheduling package installs. Can
only be used inside actions during a kickstart.
"""
def __init__(self, server_id, this_action_id, package):
"""
Constructor.
server_id is the unique number assigned to the server by the database.
this_action_id is the unique number assigned to the current action.
package is an instance of ChannelPackage.
"""
self.server_id = server_id
self.package = package
self.this_action_id = this_action_id
self.new_action_id = None
def _get_action_info(self, action_id):
"""
Private function that returns the org_id and scheduler for action_id.
"""
h = rhnSQL.prepare("""
select org_id, scheduler
from rhnAction
where id = :id
""")
h.execute(id=action_id)
row = h.fetchone_dict()
if not row:
raise NoActionInfo("Couldn't find org_id or scheduler for action %s." % str(action_id))
return (row['org_id'], row['scheduler'])
def schedule_package_install(self):
"""
Public function that schedules self.package for installation during the next rhn_check.
"""
org_id, scheduler = self._get_action_info(self.this_action_id)
self.new_action_id = rhnAction.schedule_server_action(
self.server_id,
action_type="packages.update",
action_name="Scheduling install of RHN's virtualization host packages.",
delta_time=0,
scheduler=scheduler,
org_id=org_id
)
self._add_package_to_install_action(self.new_action_id)
def _add_package_to_install_action(self, action_id):
"""
Private function that adds self.package to the rhnActionPackage table.
"""
name_id = self.package.get_name_id()
package_arch_id = self.package.get_arch_id()
evr_id = self.package.get_evr_id()
insert_package_query = rhnSQL.Statement("""
insert into rhnActionPackage(id,
action_id,
parameter,
name_id,
evr_id,
package_arch_id)
values (sequence_nextval('rhn_act_p_id_seq'),
:action_id,
'install',
:name_id,
:evr_id,
:package_arch_id)
""")
prepared_query = rhnSQL.prepare(insert_package_query)
prepared_query.execute(action_id=str(action_id),
name_id=str(name_id),
evr_id=str(evr_id),
package_arch_id=str(package_arch_id))
| gpl-2.0 |
albertjan/pypyjs | website/js/pypy.js-0.2.0/lib/modules/rexec.py | 228 | 20148 | """Restricted execution facilities.
The class RExec exports methods r_exec(), r_eval(), r_execfile(), and
r_import(), which correspond roughly to the built-in operations
exec, eval(), execfile() and import, but executing the code in an
environment that only exposes those built-in operations that are
deemed safe. To this end, a modest collection of 'fake' modules is
created which mimics the standard modules by the same names. It is a
policy decision which built-in modules and operations are made
available; this module provides a reasonable default, but derived
classes can change the policies e.g. by overriding or extending class
variables like ok_builtin_modules or methods like make_sys().
XXX To do:
- r_open should allow writing tmp dir
- r_exec etc. with explicit globals/locals? (Use rexec("exec ... in ...")?)
"""
from warnings import warnpy3k
warnpy3k("the rexec module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import sys
import __builtin__
import os
import ihooks
import imp
__all__ = ["RExec"]
class FileBase:
ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',
'readlines', 'seek', 'tell', 'write', 'writelines', 'xreadlines',
'__iter__')
class FileWrapper(FileBase):
# XXX This is just like a Bastion -- should use that!
def __init__(self, f):
for m in self.ok_file_methods:
if not hasattr(self, m) and hasattr(f, m):
setattr(self, m, getattr(f, m))
def close(self):
self.flush()
TEMPLATE = """
def %s(self, *args):
return getattr(self.mod, self.name).%s(*args)
"""
class FileDelegate(FileBase):
def __init__(self, mod, name):
self.mod = mod
self.name = name
for m in FileBase.ok_file_methods + ('close',):
exec TEMPLATE % (m, m)
class RHooks(ihooks.Hooks):
def __init__(self, *args):
# Hacks to support both old and new interfaces:
# old interface was RHooks(rexec[, verbose])
# new interface is RHooks([verbose])
verbose = 0
rexec = None
if args and type(args[-1]) == type(0):
verbose = args[-1]
args = args[:-1]
if args and hasattr(args[0], '__class__'):
rexec = args[0]
args = args[1:]
if args:
raise TypeError, "too many arguments"
ihooks.Hooks.__init__(self, verbose)
self.rexec = rexec
def set_rexec(self, rexec):
# Called by RExec instance to complete initialization
self.rexec = rexec
def get_suffixes(self):
return self.rexec.get_suffixes()
def is_builtin(self, name):
return self.rexec.is_builtin(name)
def init_builtin(self, name):
m = __import__(name)
return self.rexec.copy_except(m, ())
def init_frozen(self, name): raise SystemError, "don't use this"
def load_source(self, *args): raise SystemError, "don't use this"
def load_compiled(self, *args): raise SystemError, "don't use this"
def load_package(self, *args): raise SystemError, "don't use this"
def load_dynamic(self, name, filename, file):
return self.rexec.load_dynamic(name, filename, file)
def add_module(self, name):
return self.rexec.add_module(name)
def modules_dict(self):
return self.rexec.modules
def default_path(self):
return self.rexec.modules['sys'].path
# XXX Backwards compatibility
RModuleLoader = ihooks.FancyModuleLoader
RModuleImporter = ihooks.ModuleImporter
class RExec(ihooks._Verbose):
"""Basic restricted execution framework.
Code executed in this restricted environment will only have access to
modules and functions that are deemed safe; you can subclass RExec to
add or remove capabilities as desired.
The RExec class can prevent code from performing unsafe operations like
reading or writing disk files, or using TCP/IP sockets. However, it does
not protect against code using extremely large amounts of memory or
processor time.
"""
ok_path = tuple(sys.path) # That's a policy decision
ok_builtin_modules = ('audioop', 'array', 'binascii',
'cmath', 'errno', 'imageop',
'marshal', 'math', 'md5', 'operator',
'parser', 'select',
'sha', '_sre', 'strop', 'struct', 'time',
'_weakref')
ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',
'stat', 'times', 'uname', 'getpid', 'getppid',
'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')
ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',
'getrefcount', 'hexversion', 'maxint', 'maxunicode',
'platform', 'ps1', 'ps2', 'version', 'version_info')
nok_builtin_names = ('open', 'file', 'reload', '__import__')
ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)
def __init__(self, hooks = None, verbose = 0):
"""Returns an instance of the RExec class.
The hooks parameter is an instance of the RHooks class or a subclass
of it. If it is omitted or None, the default RHooks class is
instantiated.
Whenever the RExec module searches for a module (even a built-in one)
or reads a module's code, it doesn't actually go out to the file
system itself. Rather, it calls methods of an RHooks instance that
was passed to or created by its constructor. (Actually, the RExec
object doesn't make these calls --- they are made by a module loader
object that's part of the RExec object. This allows another level of
flexibility, which can be useful when changing the mechanics of
import within the restricted environment.)
By providing an alternate RHooks object, we can control the file
system accesses made to import a module, without changing the
actual algorithm that controls the order in which those accesses are
made. For instance, we could substitute an RHooks object that
passes all filesystem requests to a file server elsewhere, via some
RPC mechanism such as ILU. Grail's applet loader uses this to support
importing applets from a URL for a directory.
If the verbose parameter is true, additional debugging output may be
sent to standard output.
"""
raise RuntimeError, "This code is not secure in Python 2.2 and later"
ihooks._Verbose.__init__(self, verbose)
# XXX There's a circular reference here:
self.hooks = hooks or RHooks(verbose)
self.hooks.set_rexec(self)
self.modules = {}
self.ok_dynamic_modules = self.ok_builtin_modules
list = []
for mname in self.ok_builtin_modules:
if mname in sys.builtin_module_names:
list.append(mname)
self.ok_builtin_modules = tuple(list)
self.set_trusted_path()
self.make_builtin()
self.make_initial_modules()
# make_sys must be last because it adds the already created
# modules to its builtin_module_names
self.make_sys()
self.loader = RModuleLoader(self.hooks, verbose)
self.importer = RModuleImporter(self.loader, verbose)
def set_trusted_path(self):
# Set the path from which dynamic modules may be loaded.
# Those dynamic modules must also occur in ok_builtin_modules
self.trusted_path = filter(os.path.isabs, sys.path)
def load_dynamic(self, name, filename, file):
if name not in self.ok_dynamic_modules:
raise ImportError, "untrusted dynamic module: %s" % name
if name in sys.modules:
src = sys.modules[name]
else:
src = imp.load_dynamic(name, filename, file)
dst = self.copy_except(src, [])
return dst
def make_initial_modules(self):
self.make_main()
self.make_osname()
# Helpers for RHooks
def get_suffixes(self):
return [item # (suff, mode, type)
for item in imp.get_suffixes()
if item[2] in self.ok_file_types]
def is_builtin(self, mname):
return mname in self.ok_builtin_modules
# The make_* methods create specific built-in modules
def make_builtin(self):
m = self.copy_except(__builtin__, self.nok_builtin_names)
m.__import__ = self.r_import
m.reload = self.r_reload
m.open = m.file = self.r_open
def make_main(self):
self.add_module('__main__')
def make_osname(self):
osname = os.name
src = __import__(osname)
dst = self.copy_only(src, self.ok_posix_names)
dst.environ = e = {}
for key, value in os.environ.items():
e[key] = value
def make_sys(self):
m = self.copy_only(sys, self.ok_sys_names)
m.modules = self.modules
m.argv = ['RESTRICTED']
m.path = map(None, self.ok_path)
m.exc_info = self.r_exc_info
m = self.modules['sys']
l = self.modules.keys() + list(self.ok_builtin_modules)
l.sort()
m.builtin_module_names = tuple(l)
# The copy_* methods copy existing modules with some changes
def copy_except(self, src, exceptions):
dst = self.copy_none(src)
for name in dir(src):
setattr(dst, name, getattr(src, name))
for name in exceptions:
try:
delattr(dst, name)
except AttributeError:
pass
return dst
def copy_only(self, src, names):
dst = self.copy_none(src)
for name in names:
try:
value = getattr(src, name)
except AttributeError:
continue
setattr(dst, name, value)
return dst
def copy_none(self, src):
m = self.add_module(src.__name__)
m.__doc__ = src.__doc__
return m
# Add a module -- return an existing module or create one
def add_module(self, mname):
m = self.modules.get(mname)
if m is None:
self.modules[mname] = m = self.hooks.new_module(mname)
m.__builtins__ = self.modules['__builtin__']
return m
# The r* methods are public interfaces
def r_exec(self, code):
"""Execute code within a restricted environment.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
m = self.add_module('__main__')
exec code in m.__dict__
def r_eval(self, code):
"""Evaluate code within a restricted environment.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
m = self.add_module('__main__')
return eval(code, m.__dict__)
def r_execfile(self, file):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
"""
m = self.add_module('__main__')
execfile(file, m.__dict__)
def r_import(self, mname, globals={}, locals={}, fromlist=[]):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.import_module(mname, globals, locals, fromlist)
def r_reload(self, m):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.reload(m)
def r_unload(self, m):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.unload(m)
# The s_* methods are similar but also swap std{in,out,err}
def make_delegate_files(self):
s = self.modules['sys']
self.delegate_stdin = FileDelegate(s, 'stdin')
self.delegate_stdout = FileDelegate(s, 'stdout')
self.delegate_stderr = FileDelegate(s, 'stderr')
self.restricted_stdin = FileWrapper(sys.stdin)
self.restricted_stdout = FileWrapper(sys.stdout)
self.restricted_stderr = FileWrapper(sys.stderr)
def set_files(self):
if not hasattr(self, 'save_stdin'):
self.save_files()
if not hasattr(self, 'delegate_stdin'):
self.make_delegate_files()
s = self.modules['sys']
s.stdin = self.restricted_stdin
s.stdout = self.restricted_stdout
s.stderr = self.restricted_stderr
sys.stdin = self.delegate_stdin
sys.stdout = self.delegate_stdout
sys.stderr = self.delegate_stderr
def reset_files(self):
self.restore_files()
s = self.modules['sys']
self.restricted_stdin = s.stdin
self.restricted_stdout = s.stdout
self.restricted_stderr = s.stderr
def save_files(self):
self.save_stdin = sys.stdin
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
def restore_files(self):
sys.stdin = self.save_stdin
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
def s_apply(self, func, args=(), kw={}):
self.save_files()
try:
self.set_files()
r = func(*args, **kw)
finally:
self.restore_files()
return r
def s_exec(self, *args):
"""Execute code within a restricted environment.
Similar to the r_exec() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
return self.s_apply(self.r_exec, args)
def s_eval(self, *args):
"""Evaluate code within a restricted environment.
Similar to the r_eval() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
return self.s_apply(self.r_eval, args)
def s_execfile(self, *args):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
Similar to the r_execfile() method, but the code will be granted
access to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
"""
return self.s_apply(self.r_execfile, args)
def s_import(self, *args):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_import() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_import, args)
def s_reload(self, *args):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_reload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_reload, args)
def s_unload(self, *args):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_unload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_unload, args)
# Restricted open(...)
def r_open(self, file, mode='r', buf=-1):
"""Method called when open() is called in the restricted environment.
The arguments are identical to those of the open() function, and a
file object (or a class instance compatible with file objects)
should be returned. RExec's default behaviour is allow opening
any file for reading, but forbidding any attempt to write a file.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
mode = str(mode)
if mode not in ('r', 'rb'):
raise IOError, "can't open files for writing in restricted mode"
return open(file, mode, buf)
# Restricted version of sys.exc_info()
def r_exc_info(self):
ty, va, tr = sys.exc_info()
tr = None
return ty, va, tr
def test():
import getopt, traceback
opts, args = getopt.getopt(sys.argv[1:], 'vt:')
verbose = 0
trusted = []
for o, a in opts:
if o == '-v':
verbose = verbose+1
if o == '-t':
trusted.append(a)
r = RExec(verbose=verbose)
if trusted:
r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)
if args:
r.modules['sys'].argv = args
r.modules['sys'].path.insert(0, os.path.dirname(args[0]))
else:
r.modules['sys'].path.insert(0, "")
fp = sys.stdin
if args and args[0] != '-':
try:
fp = open(args[0])
except IOError, msg:
print "%s: can't open file %r" % (sys.argv[0], args[0])
return 1
if fp.isatty():
try:
import readline
except ImportError:
pass
import code
class RestrictedConsole(code.InteractiveConsole):
def runcode(self, co):
self.locals['__builtins__'] = r.modules['__builtin__']
r.s_apply(code.InteractiveConsole.runcode, (self, co))
try:
RestrictedConsole(r.modules['__main__'].__dict__).interact()
except SystemExit, n:
return n
else:
text = fp.read()
fp.close()
c = compile(text, fp.name, 'exec')
try:
r.s_exec(c)
except SystemExit, n:
return n
except:
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(test())
| mit |
uksf/modpack | tools/build.py | 1 | 3612 | #!/usr/bin/env python3
import os
import sys
import subprocess
######## GLOBALS #########
MAINPREFIX = "u"
PREFIX = "uksf_"
##########################
def tryHemttBuild(projectpath):
hemttExe = os.path.join(projectpath, "hemtt.exe")
if os.path.isfile(hemttExe):
os.chdir(projectpath)
ret = subprocess.call([hemttExe, "pack"], stderr=subprocess.STDOUT)
return True
else:
print("hemtt not installed");
return False
def mod_time(path):
if not os.path.isdir(path):
return os.path.getmtime(path)
maxi = os.path.getmtime(path)
for p in os.listdir(path):
maxi = max(mod_time(os.path.join(path, p)), maxi)
return maxi
def check_for_changes(addonspath, module):
if not os.path.exists(os.path.join(addonspath, "{}{}.pbo".format(PREFIX,module))):
return True
return mod_time(os.path.join(addonspath, module)) > mod_time(os.path.join(addonspath, "{}{}.pbo".format(PREFIX,module)))
def check_for_obsolete_pbos(addonspath, file):
module = file[len(PREFIX):-4]
if not os.path.exists(os.path.join(addonspath, module)):
return True
return False
def main(argv):
print("""
#####################
# UKSF Debug Build #
#####################
""")
compile_extensions = False
if "compile" in argv:
argv.remove("compile")
compile_extensions = True
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
addonspath = os.path.join(projectpath, "addons")
extensionspath = os.path.join(projectpath, "extensions")
if (not tryHemttBuild(projectpath)):
os.chdir(addonspath)
made = 0
failed = 0
skipped = 0
removed = 0
for file in os.listdir(addonspath):
if os.path.isfile(file):
if check_for_obsolete_pbos(addonspath, file):
removed += 1
print(" Removing obsolete file => " + file)
os.remove(file)
print("")
for p in os.listdir(addonspath):
path = os.path.join(addonspath, p)
if not os.path.isdir(path):
continue
if p[0] == ".":
continue
if not check_for_changes(addonspath, p):
skipped += 1
print(" Skipping {}.".format(p))
continue
print("# Making {} ...".format(p))
try:
subprocess.check_output([
"makepbo",
"-NUP",
"-@={}\\{}\\addons\\{}".format(MAINPREFIX,PREFIX.rstrip("_"),p),
p,
"{}{}.pbo".format(PREFIX,p)
], stderr=subprocess.STDOUT)
except:
failed += 1
print(" Failed to make {}.".format(p))
else:
made += 1
print(" Successfully made {}.".format(p))
print("\n# Done.")
print(" Made {}, skipped {}, removed {}, failed to make {}.".format(made, skipped, removed, failed))
if (compile_extensions):
try:
print("\nCompiling extensions in {}".format(extensionspath))
os.chdir(extensionspath)
# Prepare 64bit build dirs
ret = subprocess.call(["msbuild", "uksf.sln", "/m", "/p:Configuration=Release", "/p:Platform=x64"])
if ret == 1:
return 1
except:
print("Failed to compile extension")
raise
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-3.0 |
yeldartoktasynov/app-landing-page | vendor/bundle/ruby/2.2.0/gems/pygments.rb-0.6.3/vendor/pygments-main/tests/test_lexers_other.py | 29 | 2305 | # -*- coding: utf-8 -*-
"""
Tests for other lexers
~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import glob
import os
import unittest
from pygments.lexers import guess_lexer
from pygments.lexers.other import RexxLexer
def _exampleFilePath(filename):
return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
class AnalyseTextTest(unittest.TestCase):
def _testCanRecognizeAndGuessExampleFiles(self, lexer):
assert lexer is not None
for pattern in lexer.filenames:
exampleFilesPattern = _exampleFilePath(pattern)
for exampleFilePath in glob.glob(exampleFilesPattern):
exampleFile = open(exampleFilePath, 'rb')
try:
text = exampleFile.read().decode('utf-8')
probability = lexer.analyse_text(text)
self.assertTrue(probability > 0,
'%s must recognize %r' % (
lexer.name, exampleFilePath))
guessedLexer = guess_lexer(text)
self.assertEqual(guessedLexer.name, lexer.name)
finally:
exampleFile.close()
def testCanRecognizeAndGuessExampleFiles(self):
self._testCanRecognizeAndGuessExampleFiles(RexxLexer)
class RexxLexerTest(unittest.TestCase):
def testCanGuessFromText(self):
self.assertAlmostEqual(0.01,
RexxLexer.analyse_text('/* */'))
self.assertAlmostEqual(1.0,
RexxLexer.analyse_text('''/* Rexx */
say "hello world"'''))
val = RexxLexer.analyse_text('/* */\n'
'hello:pRoceduRe\n'
' say "hello world"')
self.assertTrue(val > 0.5, val)
val = RexxLexer.analyse_text('''/* */
if 1 > 0 then do
say "ok"
end
else do
say "huh?"
end''')
self.assertTrue(val > 0.2, val)
val = RexxLexer.analyse_text('''/* */
greeting = "hello world!"
parse value greeting "hello" name "!"
say name''')
self.assertTrue(val > 0.2, val)
| mit |
jacobgilroy/FinalYearProject | MainView.py | 1 | 2934 | from PyQt5.QtWidgets import QWidget, QSplitter, QVBoxLayout, QFrame, QFileDialog, QScrollArea, QMenuBar, QAction, QToolBar
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from JamSpace.Views.LaneSpaceView import LaneSpaceView
from JamSpace.Views.ControlBar import ControlBar
class MainView(QWidget):
def __init__(self):
super().__init__()
# declare member variables:
self.laneSpace = LaneSpaceView(parent=self)
self.controlBar = ControlBar(parent=self)
self.menuBar = QMenuBar(self)
self.toolBar = QToolBar(self)
self.toolBar.show()
self.laneScrollArea = QScrollArea()
self.laneScrollArea.setWidgetResizable(True)
self.WIDTH = 900
self.HEIGHT = 700
# Initialise the UI:
self.initUI()
def initUI(self):
self.setGeometry(20, 30, self.WIDTH, self.HEIGHT)
self.setWindowTitle('JamSpace')
# configure the menu bar:
# create menus:
fileMenu = self.menuBar.addMenu('&File')
editMenu = self.menuBar.addMenu('&Edit')
# create actions:
self.exitAction = QAction('Exit', self)
self.exitAction.setStatusTip('Close the application')
self.addLaneAction = QAction(QIcon('addLaneIcon.png'), 'Add Lane', self)
self.playAction = QAction(QIcon('playIcon.png'), 'Play', self)
self.stopAction = QAction(QIcon('stopIcon.ico'), 'Stop', self)
self.addLaneAction.setStatusTip('Add a new lane')
self.playAction.setStatusTip('Start playback')
self.stopAction.setStatusTip('Stop playback')
# add the actions to the menus/toolbar:
fileMenu.addAction(self.exitAction)
self.toolBar.addAction(self.playAction)
self.toolBar.addAction(self.stopAction)
self.toolBar.addAction(self.addLaneAction)
self.laneScrollArea.setWidget(self.laneSpace)
# Instantiate UI components:
laneEditSpace = QFrame(self)
laneEditSpace.setFrameShape(QFrame.StyledPanel)
clipEditSpace = QFrame(self)
clipEditSpace.setFrameShape(QFrame.StyledPanel)
# Apply layout:
vSplitter = QSplitter(Qt.Vertical)
hSplitter = QSplitter(Qt.Horizontal)
hSplitter.addWidget(laneEditSpace)
hSplitter.addWidget(clipEditSpace)
vSplitter.addWidget(self.controlBar)
vSplitter.addWidget(self.laneScrollArea)
vSplitter.addWidget(hSplitter)
vbox = QVBoxLayout(self)
vbox.addWidget(vSplitter)
#vbox.setAlignment(Qt.AlignTop)
self.setLayout(vbox)
self.show()
def showDirectoryDialog(self):
dirSelectionDialog = QFileDialog(self)
projectDir = QFileDialog.getExistingDirectory(dirSelectionDialog, 'Select Project Folder')
return projectDir | gpl-3.0 |
blckshrk/Weboob | weboob/capabilities/cinema.py | 4 | 5307 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, DateField, StringField, IntField, Field
__all__ = ['Movie', 'Person', 'ICapCinema']
class Movie(CapBaseObject):
"""
Movie object.
"""
original_title = StringField('Original title of the movie')
other_titles = Field('Titles in other countries', list)
release_date = DateField('Release date of the movie')
all_release_dates= StringField('Release dates list of the movie')
duration = IntField('Duration of the movie in minutes')
short_description= StringField('Short description of the movie')
genres = Field('Genres of the movie', list)
pitch = StringField('Short story description of the movie')
country = StringField('Origin country of the movie')
note = StringField('Notation of the movie')
roles = Field('Lists of Persons related to the movie indexed by roles', dict)
thumbnail_url = StringField('Url of movie thumbnail')
def __init__(self, id, original_title):
CapBaseObject.__init__(self, id)
self.original_title = original_title
class Person(CapBaseObject):
"""
Person object.
"""
name = StringField('Star name of a person')
real_name = StringField('Real name of a person')
birth_date = DateField('Birth date of a person')
death_date = DateField('Death date of a person')
birth_place = StringField('City and country of birth of a person')
gender = StringField('Gender of a person')
nationality = StringField('Nationality of a person')
short_biography = StringField('Short biography of a person')
biography = StringField('Full biography of a person')
short_description= StringField('Short description of a person')
roles = Field('Lists of movies related to the person indexed by roles',dict)
thumbnail_url = StringField('Url of person thumbnail')
def __init__(self, id, name):
CapBaseObject.__init__(self, id)
self.name = name
class ICapCinema(IBaseCap):
"""
Cinema databases.
"""
def iter_movies(self, pattern):
"""
Search movies and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`Movies`]
"""
raise NotImplementedError()
def get_movie(self, _id):
"""
Get a movie object from an ID.
:param _id: ID of movie
:type _id: str
:rtype: :class:`Movie`
"""
raise NotImplementedError()
def get_movie_releases(self, _id, country=None):
"""
Get a list of a movie releases from an ID.
:param _id: ID of movie
:type _id: str
:rtype: :class:`String`
"""
raise NotImplementedError()
def iter_movie_persons(self, _id, role=None):
"""
Get the list of persons who are related to a movie.
:param _id: ID of movie
:type _id: str
:rtype: iter[:class:`Person`]
"""
raise NotImplementedError()
def iter_persons(self, pattern):
"""
Search persons and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`persons`]
"""
raise NotImplementedError()
def get_person(self, _id):
"""
Get a person object from an ID.
:param _id: ID of person
:type _id: str
:rtype: :class:`Person`
"""
raise NotImplementedError()
def iter_person_movies(self, _id, role=None):
"""
Get the list of movies related to a person.
:param _id: ID of person
:type _id: str
:rtype: iter[:class:`Movie`]
"""
raise NotImplementedError()
def iter_person_movies_ids(self, _id):
"""
Get the list of movie ids related to a person.
:param _id: ID of person
:type _id: str
:rtype: iter[str]
"""
raise NotImplementedError()
def iter_movie_persons_ids(self, _id):
"""
Get the list of person ids related to a movie.
:param _id: ID of movie
:type _id: str
:rtype: iter[str]
"""
raise NotImplementedError()
def get_person_biography(self, id):
"""
Get the person full biography.
:param _id: ID of person
:type _id: str
:rtype: str
"""
raise NotImplementedError()
| agpl-3.0 |
BigEgg/LeetCode | Python/LeetCode.Test/_051_100/Test_068_TextJustification.py | 1 | 1357 | import unittest
import sys
sys.path.append('LeetCode/_051_100')
sys.path.append('LeetCode.Test')
from _068_TextJustification import Solution
import AssertHelper
class Test_068_TextJustification(unittest.TestCase):
def test_fullJustify_1(self):
solution = Solution()
result = solution.fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)
AssertHelper.assertArray([
"This is an",
"example of text",
"justification. "
], result)
def test_fullJustify_2(self):
solution = Solution()
result = solution.fullJustify(["What","must","be","acknowledgment","shall","be"], 16)
AssertHelper.assertArray([
"What must be",
"acknowledgment ",
"shall be "
], result)
def test_fullJustify_3(self):
solution = Solution()
result = solution.fullJustify(["Science","is","what","we","understand","well","enough","to","explain","to","a","computer.","Art","is","everything","else","we","do"], 20)
AssertHelper.assertArray([
"Science is what we",
"understand well",
"enough to explain to",
"a computer. Art is",
"everything else we",
"do "
], result)
| mit |
foss-transportationmodeling/rettina-server | flask/local/lib/python2.7/encodings/big5hkscs.py | 816 | 1039 | #
# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_hk, codecs
import _multibytecodec as mbc
codec = _codecs_hk.getcodec('big5hkscs')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='big5hkscs',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
kevingu1003/python-pptx | tests/test_shared.py | 5 | 2584 | # encoding: utf-8
"""
Test suite for the docx.shared module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from pptx.opc.package import XmlPart
from pptx.shared import ElementProxy, ParentedElementProxy
from .unitutil.cxml import element
from .unitutil.mock import instance_mock
class DescribeElementProxy(object):
def it_raises_on_assign_to_undefined_attr(self):
element_proxy = ElementProxy(None)
with pytest.raises(AttributeError):
element_proxy.foobar = 42
def it_knows_when_its_equal_to_another_proxy_object(self, eq_fixture):
proxy, proxy_2, proxy_3, not_a_proxy = eq_fixture
assert (proxy == proxy_2) is True
assert (proxy == proxy_3) is False
assert (proxy == not_a_proxy) is False
assert (proxy != proxy_2) is False
assert (proxy != proxy_3) is True
assert (proxy != not_a_proxy) is True
def it_knows_its_element(self, element_fixture):
proxy, element = element_fixture
assert proxy.element is element
# fixture --------------------------------------------------------
@pytest.fixture
def element_fixture(self):
p = element('w:p')
proxy = ElementProxy(p)
return proxy, p
@pytest.fixture
def eq_fixture(self):
p, q = element('w:p'), element('w:p')
proxy = ElementProxy(p)
proxy_2 = ElementProxy(p)
proxy_3 = ElementProxy(q)
not_a_proxy = 'Foobar'
return proxy, proxy_2, proxy_3, not_a_proxy
class DescribeParentedElementProxy(object):
def it_knows_its_parent(self, parent_fixture):
proxy, parent = parent_fixture
assert proxy.parent is parent
def it_knows_its_part(self, part_fixture):
proxy, part_ = part_fixture
assert proxy.part is part_
# fixture --------------------------------------------------------
@pytest.fixture
def parent_fixture(self):
parent = 42
proxy = ParentedElementProxy(element('w:p'), parent)
return proxy, parent
@pytest.fixture
def part_fixture(self, other_proxy_, part_):
other_proxy_.part = part_
proxy = ParentedElementProxy(None, other_proxy_)
return proxy, part_
# fixture components ---------------------------------------------
@pytest.fixture
def other_proxy_(self, request):
return instance_mock(request, ParentedElementProxy)
@pytest.fixture
def part_(self, request):
return instance_mock(request, XmlPart)
| mit |
heromod/migrid | mig/cgi-bin/find.py | 1 | 1096 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# find - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.find import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
| gpl-2.0 |
quanvm009/codev7 | openerp/addons/auth_ldap/__openerp__.py | 108 | 4839 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Authentication via LDAP',
'version' : '1.0',
'depends' : ['base'],
'images' : ['images/ldap_configuration.jpeg'],
'author' : 'OpenERP SA',
'description': """
Adds support for authentication by LDAP server.
===============================================
This module allows users to login with their LDAP username and password, and
will automatically create OpenERP users for them on the fly.
**Note:** This module only work on servers who have Python's ``ldap`` module installed.
Configuration:
--------------
After installing this module, you need to configure the LDAP parameters in the
Configuration tab of the Company details. Different companies may have different
LDAP servers, as long as they have unique usernames (usernames need to be unique
in OpenERP, even across multiple companies).
Anonymous LDAP binding is also supported (for LDAP servers that allow it), by
simply keeping the LDAP user and password empty in the LDAP configuration.
This does not allow anonymous authentication for users, it is only for the master
LDAP account that is used to verify if a user exists before attempting to
authenticate it.
Securing the connection with STARTTLS is available for LDAP servers supporting
it, by enabling the TLS option in the LDAP configuration.
For further options configuring the LDAP settings, refer to the ldap.conf
manpage: manpage:`ldap.conf(5)`.
Security Considerations:
------------------------
Users' LDAP passwords are never stored in the OpenERP database, the LDAP server
is queried whenever a user needs to be authenticated. No duplication of the
password occurs, and passwords are managed in one place only.
OpenERP does not manage password changes in the LDAP, so any change of password
should be conducted by other means in the LDAP directory directly (for LDAP users).
It is also possible to have local OpenERP users in the database along with
LDAP-authenticated users (the Administrator account is one obvious example).
Here is how it works:
---------------------
* The system first attempts to authenticate users against the local OpenERP
database;
* if this authentication fails (for example because the user has no local
password), the system then attempts to authenticate against LDAP;
As LDAP users have blank passwords by default in the local OpenERP database
(which means no access), the first step always fails and the LDAP server is
queried to do the authentication.
Enabling STARTTLS ensures that the authentication query to the LDAP server is
encrypted.
User Template:
--------------
In the LDAP configuration on the Company form, it is possible to select a *User
Template*. If set, this user will be used as template to create the local users
whenever someone authenticates for the first time via LDAP authentication. This
allows pre-setting the default groups and menus of the first-time users.
**Warning:** if you set a password for the user template, this password will be
assigned as local password for each new LDAP user, effectively setting
a *master password* for these users (until manually changed). You
usually do not want this. One easy way to setup a template user is to
login once with a valid LDAP user, let OpenERP create a blank local
user with the same login (and a blank password), then rename this new
user to a username that does not exist in LDAP, and setup its groups
the way you want.
""",
'website' : 'http://www.openerp.com',
'category' : 'Authentication',
'data' : [
'users_ldap_view.xml',
'user_ldap_installer.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'installable': True,
'external_dependencies' : {
'python' : ['ldap'],
}
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
evanbiederstedt/CMBintheLikeHoodz | source_code/CAMB_vary_OmegaB_lmax1100_Feb2016.py | 1 | 137613 |
# coding: utf-8
# In[1]:
#
#
# hundred_samples = np.linspace(0.05, 0.5, num=100)
#
# Planck found \Omega_CDM
# GAVO simulated map set at \Omega_CDM = 0.122
# CAMB default below at omch2=0.122
#
# In[2]:
#
# First output 200 CAMB scalar outputs
#
# 0.005 to 0.05
#
# In[3]:
from matplotlib import pyplot as plt
import numpy as np
import camb
from camb import model, initialpower
# In[4]:
"""
#Set up a new set of parameters for CAMB
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(2000, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
for name in powers:
print(name)
# In[5]:
# plot the total lensed CMB power spectra versus unlensed, and fractional difference
totCL=powers['total']
unlensedCL=powers['unlensed_scalar']
print(totCL.shape)
# Python CL arrays are all zero based (starting at L=0), Note L=0,1 entries will be zero by default.
# The differenent CL are always in the order TT, EE, BB, TE (with BB=0 for unlensed scalar results).
ls = np.arange(totCL.shape[0])
print(ls)
#print(totCL[:30]) # print first 30 totCL
fig, ax = plt.subplots(2,2, figsize = (12,12))
ax[0,0].plot(ls,totCL[:,0], color='k')
ax[0,0].plot(ls,unlensedCL[:,0], color='r')
ax[0,0].set_title('TT')
ax[0,1].plot(ls[2:], 1-unlensedCL[2:,0]/totCL[2:,0]);
ax[0,1].set_title(r'$\Delta TT$')
ax[1,0].plot(ls,totCL[:,1], color='k')
ax[1,0].plot(ls,unlensedCL[:,1], color='r')
ax[1,0].set_title(r'$EE$')
ax[1,1].plot(ls,totCL[:,3], color='k')
ax[1,1].plot(ls,unlensedCL[:,3], color='r')
ax[1,1].set_title(r'$TE$');
for ax in ax.reshape(-1): ax.set_xlim([2,2500])
"""
# In[6]:
twohundred_samples = np.linspace(0.005, 0.05, num=200)
#print(twohundred_samples)
#Set up a new set of parameters for CAMB
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
pars.set_for_lmax(2500, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers =results.get_cmb_power_spectra(pars)
for name in powers:
print(name)
"""
array([ 0.005 , 0.00522613, 0.00545226, 0.00567839, 0.00590452,
0.00613065, 0.00635678, 0.00658291, 0.00680905, 0.00703518,
0.00726131, 0.00748744, 0.00771357, 0.0079397 , 0.00816583,
0.00839196, 0.00861809, 0.00884422, 0.00907035, 0.00929648,
0.00952261, 0.00974874, 0.00997487, 0.01020101, 0.01042714,
0.01065327, 0.0108794 , 0.01110553, 0.01133166, 0.01155779,
0.01178392, 0.01201005, 0.01223618, 0.01246231, 0.01268844,
0.01291457, 0.0131407 , 0.01336683, 0.01359296, 0.0138191 ,
0.01404523, 0.01427136, 0.01449749, 0.01472362, 0.01494975,
0.01517588, 0.01540201, 0.01562814, 0.01585427, 0.0160804 ,
0.01630653, 0.01653266, 0.01675879, 0.01698492, 0.01721106,
0.01743719, 0.01766332, 0.01788945, 0.01811558, 0.01834171,
0.01856784, 0.01879397, 0.0190201 , 0.01924623, 0.01947236,
0.01969849, 0.01992462, 0.02015075, 0.02037688, 0.02060302,
0.02082915, 0.02105528, 0.02128141, 0.02150754, 0.02173367,
0.0219598 , 0.02218593, 0.02241206, 0.02263819, 0.02286432,
0.02309045, 0.02331658, 0.02354271, 0.02376884, 0.02399497,
0.02422111, 0.02444724, 0.02467337, 0.0248995 , 0.02512563,
0.02535176, 0.02557789, 0.02580402, 0.02603015, 0.02625628,
0.02648241, 0.02670854, 0.02693467, 0.0271608 , 0.02738693,
0.02761307, 0.0278392 , 0.02806533, 0.02829146, 0.02851759,
0.02874372, 0.02896985, 0.02919598, 0.02942211, 0.02964824,
0.02987437, 0.0301005 , 0.03032663, 0.03055276, 0.03077889,
0.03100503, 0.03123116, 0.03145729, 0.03168342, 0.03190955,
0.03213568, 0.03236181, 0.03258794, 0.03281407, 0.0330402 ,
0.03326633, 0.03349246, 0.03371859, 0.03394472, 0.03417085,
0.03439698, 0.03462312, 0.03484925, 0.03507538, 0.03530151,
0.03552764, 0.03575377, 0.0359799 , 0.03620603, 0.03643216,
0.03665829, 0.03688442, 0.03711055, 0.03733668, 0.03756281,
0.03778894, 0.03801508, 0.03824121, 0.03846734, 0.03869347,
0.0389196 , 0.03914573, 0.03937186, 0.03959799, 0.03982412,
0.04005025, 0.04027638, 0.04050251, 0.04072864, 0.04095477,
0.0411809 , 0.04140704, 0.04163317, 0.0418593 , 0.04208543,
0.04231156, 0.04253769, 0.04276382, 0.04298995, 0.04321608,
0.04344221, 0.04366834, 0.04389447, 0.0441206 , 0.04434673,
0.04457286, 0.04479899, 0.04502513, 0.04525126, 0.04547739,
0.04570352, 0.04592965, 0.04615578, 0.04638191, 0.04660804,
0.04683417, 0.0470603 , 0.04728643, 0.04751256, 0.04773869,
0.04796482, 0.04819095, 0.04841709, 0.04864322, 0.04886935,
0.04909548, 0.04932161, 0.04954774, 0.04977387, 0.05 ])
"""
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls0 = unlencl[:,0][2:1101]
print(len(cls0))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls1 = unlencl[:,0][2:1101]
print(len(cls1))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls2 = unlencl[:,0][2:1101]
print(len(cls2))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls3 = unlencl[:,0][2:1101]
print(len(cls3))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls4 = unlencl[:,0][2:1101]
print(len(cls4))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls5 = unlencl[:,0][2:1101]
print(len(cls5))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls6 = unlencl[:,0][2:1101]
print(len(cls6))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls7 = unlencl[:,0][2:1101]
print(len(cls7))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls8 = unlencl[:,0][2:1101]
print(len(cls8))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls9 = unlencl[:,0][2:1101]
print(len(cls9))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls10 = unlencl[:,0][2:1101]
print(len(cls10))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls11 = unlencl[:,0][2:1101]
print(len(cls11))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls12 = unlencl[:,0][2:1101]
print(len(cls12))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls13 = unlencl[:,0][2:1101]
print(len(cls13))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls14 = unlencl[:,0][2:1101]
print(len(cls14))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls15 = unlencl[:,0][2:1101]
print(len(cls15))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls16 = unlencl[:,0][2:1101]
print(len(cls16))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls17 = unlencl[:,0][2:1101]
print(len(cls17))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls18 = unlencl[:,0][2:1101]
print(len(cls18))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls19 = unlencl[:,0][2:1101]
print(len(cls19))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls20 = unlencl[:,0][2:1101]
print(len(cls20))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls21 = unlencl[:,0][2:1101]
print(len(cls21))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls22 = unlencl[:,0][2:1101]
print(len(cls22))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls23 = unlencl[:,0][2:1101]
print(len(cls23))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls24 = unlencl[:,0][2:1101]
print(len(cls24))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls25 = unlencl[:,0][2:1101]
print(len(cls25))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls26 = unlencl[:,0][2:1101]
print(len(cls26))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls27 = unlencl[:,0][2:1101]
print(len(cls27))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls28 = unlencl[:,0][2:1101]
print(len(cls28))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls29 = unlencl[:,0][2:1101]
print(len(cls29))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls30 = unlencl[:,0][2:1101]
print(len(cls30))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls31 = unlencl[:,0][2:1101]
print(len(cls31))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls32 = unlencl[:,0][2:1101]
print(len(cls32))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls33 = unlencl[:,0][2:1101]
print(len(cls33))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls34 = unlencl[:,0][2:1101]
print(len(cls34))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls35 = unlencl[:,0][2:1101]
print(len(cls35))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls36 = unlencl[:,0][2:1101]
print(len(cls36))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls37 = unlencl[:,0][2:1101]
print(len(cls37))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls38 = unlencl[:,0][2:1101]
print(len(cls38))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls39 = unlencl[:,0][2:1101]
print(len(cls39))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls40 = unlencl[:,0][2:1101]
print(len(cls40))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls41 = unlencl[:,0][2:1101]
print(len(cls41))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls42 = unlencl[:,0][2:1101]
print(len(cls42))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls43 = unlencl[:,0][2:1101]
print(len(cls43))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls44 = unlencl[:,0][2:1101]
print(len(cls44))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls45 = unlencl[:,0][2:1101]
print(len(cls45))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls46 = unlencl[:,0][2:1101]
print(len(cls46))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls47 = unlencl[:,0][2:1101]
print(len(cls47))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls48 = unlencl[:,0][2:1101]
print(len(cls48))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls49 = unlencl[:,0][2:1101]
print(len(cls49))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls50 = unlencl[:,0][2:1101]
print(len(cls50))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls51 = unlencl[:,0][2:1101]
print(len(cls51))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls52 = unlencl[:,0][2:1101]
print(len(cls52))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls53 = unlencl[:,0][2:1101]
print(len(cls53))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls54 = unlencl[:,0][2:1101]
print(len(cls54))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls55 = unlencl[:,0][2:1101]
print(len(cls55))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls56 = unlencl[:,0][2:1101]
print(len(cls56))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls57 = unlencl[:,0][2:1101]
print(len(cls57))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls58 = unlencl[:,0][2:1101]
print(len(cls58))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls59 = unlencl[:,0][2:1101]
print(len(cls59))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls60 = unlencl[:,0][2:1101]
print(len(cls60))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls61 = unlencl[:,0][2:1101]
print(len(cls61))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls62 = unlencl[:,0][2:1101]
print(len(cls62))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls63 = unlencl[:,0][2:1101]
print(len(cls63))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls64 = unlencl[:,0][2:1101]
print(len(cls64))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls65 = unlencl[:,0][2:1101]
print(len(cls65))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls66 = unlencl[:,0][2:1101]
print(len(cls66))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls67 = unlencl[:,0][2:1101]
print(len(cls67))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls68 = unlencl[:,0][2:1101]
print(len(cls68))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls69 = unlencl[:,0][2:1101]
print(len(cls69))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls70 = unlencl[:,0][2:1101]
print(len(cls70))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls71 = unlencl[:,0][2:1101]
print(len(cls71))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls72 = unlencl[:,0][2:1101]
print(len(cls72))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls73 = unlencl[:,0][2:1101]
print(len(cls73))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls74 = unlencl[:,0][2:1101]
print(len(cls74))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls75 = unlencl[:,0][2:1101]
print(len(cls75))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls76 = unlencl[:,0][2:1101]
print(len(cls76))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls77 = unlencl[:,0][2:1101]
print(len(cls77))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls78 = unlencl[:,0][2:1101]
print(len(cls78))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls79 = unlencl[:,0][2:1101]
print(len(cls79))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls80 = unlencl[:,0][2:1101]
print(len(cls80))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls81 = unlencl[:,0][2:1101]
print(len(cls81))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls82 = unlencl[:,0][2:1101]
print(len(cls82))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls83 = unlencl[:,0][2:1101]
print(len(cls83))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls84 = unlencl[:,0][2:1101]
print(len(cls84))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls85 = unlencl[:,0][2:1101]
print(len(cls85))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls86 = unlencl[:,0][2:1101]
print(len(cls86))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls87 = unlencl[:,0][2:1101]
print(len(cls87))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls88 = unlencl[:,0][2:1101]
print(len(cls88))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls89 = unlencl[:,0][2:1101]
print(len(cls89))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls90 = unlencl[:,0][2:1101]
print(len(cls90))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls91 = unlencl[:,0][2:1101]
print(len(cls91))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls92 = unlencl[:,0][2:1101]
print(len(cls92))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls93 = unlencl[:,0][2:1101]
print(len(cls93))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls94 = unlencl[:,0][2:1101]
print(len(cls94))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls95 = unlencl[:,0][2:1101]
print(len(cls95))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls96 = unlencl[:,0][2:1101]
print(len(cls96))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls97 = unlencl[:,0][2:1101]
print(len(cls97))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls98 = unlencl[:,0][2:1101]
print(len(cls98))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls99 = unlencl[:,0][2:1101]
print(len(cls99))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls100 = unlencl[:,0][2:1101]
print(len(cls100))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls101 = unlencl[:,0][2:1101]
print(len(cls101))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls102 = unlencl[:,0][2:1101]
print(len(cls102))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls103 = unlencl[:,0][2:1101]
print(len(cls103))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls104 = unlencl[:,0][2:1101]
print(len(cls104))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls105 = unlencl[:,0][2:1101]
print(len(cls105))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls106 = unlencl[:,0][2:1101]
print(len(cls106))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls107 = unlencl[:,0][2:1101]
print(len(cls107))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls108 = unlencl[:,0][2:1101]
print(len(cls108))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls109 = unlencl[:,0][2:1101]
print(len(cls109))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls110 = unlencl[:,0][2:1101]
print(len(cls110))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls111 = unlencl[:,0][2:1101]
print(len(cls111))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls112 = unlencl[:,0][2:1101]
print(len(cls112))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls113 = unlencl[:,0][2:1101]
print(len(cls113))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls114 = unlencl[:,0][2:1101]
print(len(cls114))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls115 = unlencl[:,0][2:1101]
print(len(cls115))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls116 = unlencl[:,0][2:1101]
print(len(cls116))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls117 = unlencl[:,0][2:1101]
print(len(cls117))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls118 = unlencl[:,0][2:1101]
print(len(cls118))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls119 = unlencl[:,0][2:1101]
print(len(cls119))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls120 = unlencl[:,0][2:1101]
print(len(cls120))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls121 = unlencl[:,0][2:1101]
print(len(cls121))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls122 = unlencl[:,0][2:1101]
print(len(cls122))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls123 = unlencl[:,0][2:1101]
print(len(cls123))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls124 = unlencl[:,0][2:1101]
print(len(cls124))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls125 = unlencl[:,0][2:1101]
print(len(cls125))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls126 = unlencl[:,0][2:1101]
print(len(cls126))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls127 = unlencl[:,0][2:1101]
print(len(cls127))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls128 = unlencl[:,0][2:1101]
print(len(cls128))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls129 = unlencl[:,0][2:1101]
print(len(cls129))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls130 = unlencl[:,0][2:1101]
print(len(cls130))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls131 = unlencl[:,0][2:1101]
print(len(cls131))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls132 = unlencl[:,0][2:1101]
print(len(cls132))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls133 = unlencl[:,0][2:1101]
print(len(cls133))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls134 = unlencl[:,0][2:1101]
print(len(cls134))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls135 = unlencl[:,0][2:1101]
print(len(cls135))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls136 = unlencl[:,0][2:1101]
print(len(cls136))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls137 = unlencl[:,0][2:1101]
print(len(cls137))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls138 = unlencl[:,0][2:1101]
print(len(cls138))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls139 = unlencl[:,0][2:1101]
print(len(cls139))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls140 = unlencl[:,0][2:1101]
print(len(cls140))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls141 = unlencl[:,0][2:1101]
print(len(cls141))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls142 = unlencl[:,0][2:1101]
print(len(cls142))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls143 = unlencl[:,0][2:1101]
print(len(cls143))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls144 = unlencl[:,0][2:1101]
print(len(cls144))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls145 = unlencl[:,0][2:1101]
print(len(cls145))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls146 = unlencl[:,0][2:1101]
print(len(cls146))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls147 = unlencl[:,0][2:1101]
print(len(cls147))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls148 = unlencl[:,0][2:1101]
print(len(cls148))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls149 = unlencl[:,0][2:1101]
print(len(cls149))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls150 = unlencl[:,0][2:1101]
print(len(cls150))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls151 = unlencl[:,0][2:1101]
print(len(cls151))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls152 = unlencl[:,0][2:1101]
print(len(cls152))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls153 = unlencl[:,0][2:1101]
print(len(cls153))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls154 = unlencl[:,0][2:1101]
print(len(cls154))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls155 = unlencl[:,0][2:1101]
print(len(cls155))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls156 = unlencl[:,0][2:1101]
print(len(cls156))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls157 = unlencl[:,0][2:1101]
print(len(cls157))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls158 = unlencl[:,0][2:1101]
print(len(cls158))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls159 = unlencl[:,0][2:1101]
print(len(cls159))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls160 = unlencl[:,0][2:1101]
print(len(cls160))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls161 = unlencl[:,0][2:1101]
print(len(cls161))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls162 = unlencl[:,0][2:1101]
print(len(cls162))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls163 = unlencl[:,0][2:1101]
print(len(cls163))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls164 = unlencl[:,0][2:1101]
print(len(cls164))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls165 = unlencl[:,0][2:1101]
print(len(cls165))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls166 = unlencl[:,0][2:1101]
print(len(cls166))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls167 = unlencl[:,0][2:1101]
print(len(cls167))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls168 = unlencl[:,0][2:1101]
print(len(cls168))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls169 = unlencl[:,0][2:1101]
print(len(cls169))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls170 = unlencl[:,0][2:1101]
print(len(cls170))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls171 = unlencl[:,0][2:1101]
print(len(cls171))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls172 = unlencl[:,0][2:1101]
print(len(cls172))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls173 = unlencl[:,0][2:1101]
print(len(cls173))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls174 = unlencl[:,0][2:1101]
print(len(cls174))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls175 = unlencl[:,0][2:1101]
print(len(cls175))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls176 = unlencl[:,0][2:1101]
print(len(cls176))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls177 = unlencl[:,0][2:1101]
print(len(cls177))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls178 = unlencl[:,0][2:1101]
print(len(cls178))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls179 = unlencl[:,0][2:1101]
print(len(cls179))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls180 = unlencl[:,0][2:1101]
print(len(cls180))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls181 = unlencl[:,0][2:1101]
print(len(cls181))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls182 = unlencl[:,0][2:1101]
print(len(cls182))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls183 = unlencl[:,0][2:1101]
print(len(cls183))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls184 = unlencl[:,0][2:1101]
print(len(cls184))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls185 = unlencl[:,0][2:1101]
print(len(cls185))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls186 = unlencl[:,0][2:1101]
print(len(cls186))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls187 = unlencl[:,0][2:1101]
print(len(cls187))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls188 = unlencl[:,0][2:1101]
print(len(cls188))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls189 = unlencl[:,0][2:1101]
print(len(cls189))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls190 = unlencl[:,0][2:1101]
print(len(cls190))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls191 = unlencl[:,0][2:1101]
print(len(cls191))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls192 = unlencl[:,0][2:1101]
print(len(cls192))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls193 = unlencl[:,0][2:1101]
print(len(cls193))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls194 = unlencl[:,0][2:1101]
print(len(cls194))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls195 = unlencl[:,0][2:1101]
print(len(cls195))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls196 = unlencl[:,0][2:1101]
print(len(cls196))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls197 = unlencl[:,0][2:1101]
print(len(cls197))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls198 = unlencl[:,0][2:1101]
print(len(cls198))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls199 = unlencl[:,0][2:1101]
print(len(cls199))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls200 = unlencl[:,0][2:1101]
print(len(cls200))
"""
0.005
0.00522613065327
0.00545226130653
0.0056783919598
0.00590452261307
0.00613065326633
0.0063567839196
0.00658291457286
0.00680904522613
0.0070351758794
0.00726130653266
0.00748743718593
0.0077135678392
0.00793969849246
0.00816582914573
0.00839195979899
0.00861809045226
0.00884422110553
0.00907035175879
0.00929648241206
0.00952261306533
0.00974874371859
0.00997487437186
0.0102010050251
0.0104271356784
0.0106532663317
0.0108793969849
0.0111055276382
0.0113316582915
0.0115577889447
0.011783919598
0.0120100502513
0.0122361809045
0.0124623115578
0.0126884422111
0.0129145728643
0.0131407035176
0.0133668341709
0.0135929648241
0.0138190954774
0.0140452261307
0.0142713567839
0.0144974874372
0.0147236180905
0.0149497487437
0.015175879397
0.0154020100503
0.0156281407035
0.0158542713568
0.0160804020101
0.0163065326633
0.0165326633166
0.0167587939698
0.0169849246231
0.0172110552764
0.0174371859296
0.0176633165829
0.0178894472362
0.0181155778894
0.0183417085427
0.018567839196
0.0187939698492
0.0190201005025
0.0192462311558
0.019472361809
0.0196984924623
0.0199246231156
0.0201507537688
0.0203768844221
0.0206030150754
0.0208291457286
0.0210552763819
0.0212814070352
0.0215075376884
0.0217336683417
0.021959798995
0.0221859296482
0.0224120603015
0.0226381909548
0.022864321608
0.0230904522613
0.0233165829146
0.0235427135678
0.0237688442211
0.0239949748744
0.0242211055276
0.0244472361809
0.0246733668342
0.0248994974874
0.0251256281407
0.025351758794
0.0255778894472
0.0258040201005
0.0260301507538
0.026256281407
0.0264824120603
0.0267085427136
0.0269346733668
0.0271608040201
0.0273869346734
0.0276130653266
0.0278391959799
0.0280653266332
0.0282914572864
0.0285175879397
0.028743718593
0.0289698492462
0.0291959798995
0.0294221105528
0.029648241206
0.0298743718593
0.0301005025126
0.0303266331658
0.0305527638191
0.0307788944724
0.0310050251256
0.0312311557789
0.0314572864322
0.0316834170854
0.0319095477387
0.032135678392
0.0323618090452
0.0325879396985
0.0328140703518
0.033040201005
0.0332663316583
0.0334924623116
0.0337185929648
0.0339447236181
0.0341708542714
0.0343969849246
0.0346231155779
0.0348492462312
0.0350753768844
0.0353015075377
0.035527638191
0.0357537688442
0.0359798994975
0.0362060301508
0.036432160804
0.0366582914573
0.0368844221106
0.0371105527638
0.0373366834171
0.0375628140704
0.0377889447236
0.0380150753769
0.0382412060302
0.0384673366834
0.0386934673367
0.0389195979899
0.0391457286432
0.0393718592965
0.0395979899497
0.039824120603
0.0400502512563
0.0402763819095
0.0405025125628
0.0407286432161
0.0409547738693
0.0411809045226
0.0414070351759
0.0416331658291
0.0418592964824
0.0420854271357
0.0423115577889
0.0425376884422
0.0427638190955
0.0429899497487
0.043216080402
0.0434422110553
0.0436683417085
0.0438944723618
0.0441206030151
0.0443467336683
0.0445728643216
0.0447989949749
0.0450251256281
0.0452512562814
0.0454773869347
0.0457035175879
0.0459296482412
0.0461557788945
0.0463819095477
0.046608040201
0.0468341708543
0.0470603015075
0.0472864321608
0.0475125628141
0.0477386934673
0.0479648241206
0.0481909547739
0.0484170854271
0.0486432160804
0.0488693467337
0.0490954773869
0.0493216080402
0.0495477386935
0.0497738693467
0.05
"""
# In[50]:
cl_array = np.array([cls0, cls1, cls2, cls3, cls4, cls5, cls6, cls7, cls8, cls9, cls10,
cls11, cls12, cls13, cls14, cls15, cls16, cls17, cls18, cls19, cls20,
cls21, cls22, cls23, cls24, cls25, cls26, cls27, cls28, cls29, cls30,
cls31, cls32, cls33, cls34, cls35, cls36, cls37, cls38, cls39, cls40,
cls41, cls42, cls43, cls44, cls45, cls46, cls47, cls48, cls49, cls50,
cls51, cls52, cls53, cls54, cls55, cls56, cls57, cls58, cls59, cls60,
cls61, cls62, cls63, cls64, cls65, cls66, cls67, cls68, cls69, cls70,
cls71, cls72, cls73, cls74, cls75, cls76, cls77, cls78, cls79, cls80,
cls81, cls82, cls83, cls84, cls85, cls86, cls87, cls88, cls89, cls90,
cls91, cls92, cls93, cls94, cls95, cls96, cls97, cls98, cls99, cls100,
cls101, cls102, cls103, cls104, cls105, cls106, cls107, cls108, cls109, cls110,
cls111, cls112, cls113, cls114, cls115, cls116, cls117, cls118, cls119, cls120,
cls121, cls122, cls123, cls124, cls125, cls126, cls127, cls128, cls129, cls130,
cls131, cls132, cls133, cls134, cls135, cls136, cls137, cls138, cls139, cls140,
cls141, cls142, cls143, cls144, cls145, cls146, cls147, cls148, cls149, cls150,
cls151, cls152, cls153, cls154, cls155, cls156, cls157, cls158, cls159, cls160,
cls161, cls162, cls163, cls164, cls165, cls166, cls167, cls168, cls169, cls170,
cls171, cls172, cls173, cls174, cls175, cls176, cls177, cls178, cls179, cls180,
cls181, cls182, cls183, cls184, cls185, cls186, cls187, cls188, cls189, cls190,
cls191, cls192, cls193, cls194, cls195, cls196, cls197, cls198, cls199, cls200])
# In[51]:
print(cl_array.shape)
# In[52]:
f = "CAMB_cl_varyBaryon_lmax1100varyFeb2016.npy"
np.save(f, cl_array)
| mit |
pratikmallya/hue | desktop/core/ext-py/boto-2.38.0/boto/mashups/interactive.py | 148 | 2783 | # Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from __future__ import print_function
import socket
import sys
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
print('\r\n*** EOF\r\n', end=' ')
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
| apache-2.0 |
anish/buildbot | master/buildbot/reporters/gerrit_verify_status.py | 1 | 8571 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.python import failure
from buildbot.process.properties import Interpolate
from buildbot.process.properties import Properties
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import RETRY
from buildbot.process.results import SKIPPED
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.reporters import http
from buildbot.util import httpclientservice
from buildbot.util.logger import Logger
log = Logger()
class GerritVerifyStatusPush(http.HttpStatusPushBase):
name = "GerritVerifyStatusPush"
neededDetails = dict(wantProperties=True)
# overridable constants
RESULTS_TABLE = {
SUCCESS: 1,
WARNINGS: 1,
FAILURE: -1,
SKIPPED: 0,
EXCEPTION: 0,
RETRY: 0,
CANCELLED: 0
}
DEFAULT_RESULT = -1
@defer.inlineCallbacks
def reconfigService(self,
baseURL,
auth,
startDescription=None,
endDescription=None,
verification_name=None,
abstain=False,
category=None,
reporter=None,
verbose=False,
**kwargs):
auth = yield self.renderSecrets(auth)
yield super().reconfigService(**kwargs)
if baseURL.endswith('/'):
baseURL = baseURL[:-1]
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, baseURL, auth=auth,
debug=self.debug, verify=self.verify)
self._verification_name = verification_name or Interpolate(
'%(prop:buildername)s')
self._reporter = reporter or "buildbot"
self._abstain = abstain
self._category = category
self._startDescription = startDescription or 'Build started.'
self._endDescription = endDescription or 'Build done.'
self._verbose = verbose
def createStatus(self,
change_id,
revision_id,
name,
value,
abstain=None,
rerun=None,
comment=None,
url=None,
reporter=None,
category=None,
duration=None):
"""
Abstract the POST REST api documented here:
https://gerrit.googlesource.com/plugins/verify-status/+/master/src/main/resources/Documentation/rest-api-changes.md
:param change_id: The change_id for the change tested (can be in the long form e.g:
myProject~master~I8473b95934b5732ac55d26311a706c9c2bde9940 or in the short integer form).
:param revision_id: the revision_id tested can be the patchset number or
the commit id (short or long).
:param name: The name of the job.
:param value: The pass/fail result for this job: -1: fail 0: unstable, 1: succeed
:param abstain: Whether the value counts as a vote (defaults to false)
:param rerun: Whether this result is from a re-test on the same patchset
:param comment: A short comment about this job
:param url: The url link to more info about this job
:reporter: The user that verified this job
:category: A category for this job
"duration": The time it took to run this job
:return: A deferred with the result from Gerrit.
"""
payload = {'name': name, 'value': value}
if abstain is not None:
payload['abstain'] = abstain
if rerun is not None:
payload['rerun'] = rerun
if comment is not None:
payload['comment'] = comment
if url is not None:
payload['url'] = url
if reporter is not None:
payload['reporter'] = reporter
if category is not None:
payload['category'] = category
if duration is not None:
payload['duration'] = duration
if self._verbose:
log.debug(
'Sending Gerrit status for {change_id}/{revision_id}: data={data}',
change_id=change_id,
revision_id=revision_id,
data=payload)
return self._http.post(
'/'.join([
'/a/changes', str(change_id), 'revisions', str(revision_id),
'verify-status~verifications'
]),
json=payload)
def formatDuration(self, duration):
"""Format the duration.
This method could be overridden if really needed, as the duration format in gerrit
is an arbitrary string.
:param duration: duration in timedelta
"""
days = duration.days
hours, remainder = divmod(duration.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if days:
return '{} day{} {}h {}m {}s'.format(days, "s" if days > 1 else "",
hours, minutes, seconds)
elif hours:
return '{}h {}m {}s'.format(hours, minutes, seconds)
return '{}m {}s'.format(minutes, seconds)
@staticmethod
def getGerritChanges(props):
""" Get the gerrit changes
This method could be overridden if really needed to accommodate for other
custom steps method for fetching gerrit changes.
:param props: an IProperty
:return: (optionally via deferred) a list of dictionary with at list
change_id, and revision_id,
which format is the one accepted by the gerrit REST API as of
/changes/:change_id/revision/:revision_id paths (see gerrit doc)
"""
if 'gerrit_changes' in props:
return props.getProperty('gerrit_changes')
if 'event.change.number' in props:
return [{
'change_id': props.getProperty('event.change.number'),
'revision_id': props.getProperty('event.patchSet.number')
}]
return []
@defer.inlineCallbacks
def send(self, build):
props = Properties.fromDict(build['properties'])
if build['complete']:
value = self.RESULTS_TABLE.get(build['results'],
self.DEFAULT_RESULT)
comment = yield props.render(self._endDescription)
duration = self.formatDuration(build['complete_at'] - build[
'started_at'])
else:
value = 0
comment = yield props.render(self._startDescription)
duration = 'pending'
name = yield props.render(self._verification_name)
reporter = yield props.render(self._reporter)
category = yield props.render(self._category)
abstain = yield props.render(self._abstain)
# TODO: find reliable way to find out whether its a rebuild
rerun = None
changes = yield self.getGerritChanges(props)
for change in changes:
try:
yield self.createStatus(
change['change_id'],
change['revision_id'],
name,
value,
abstain=abstain,
rerun=rerun,
comment=comment,
url=build['url'],
reporter=reporter,
category=category,
duration=duration)
except Exception:
log.failure(
'Failed to send status!', failure=failure.Failure())
| gpl-2.0 |
novafloss/jenkins-github-poller | jenkins_epo/rest.py | 2 | 3207 | # This file is part of jenkins-epo
#
# jenkins-epo is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or any later version.
#
# jenkins-epo is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# jenkins-epo. If not, see <http://www.gnu.org/licenses/>.
import ast
import collections
import logging
import aiohttp
from yarl import URL
from .utils import retry
logger = logging.getLogger(__name__)
class Payload(object):
@classmethod
def factory(cls, status, headers, payload):
if isinstance(payload, list):
return PayloadList(status, headers, payload)
elif isinstance(payload, dict):
return PayloadDict(status, headers, payload)
elif isinstance(payload, str):
return PayloadString(status, headers, payload)
else:
raise Exception("Unhandled payload type")
def __init__(self, status, headers, payload):
super(Payload, self).__init__(payload)
self.status = status
self.headers = headers
class PayloadList(Payload, collections.UserList):
pass
class PayloadDict(Payload, collections.UserDict):
pass
class PayloadString(Payload, collections.UserString):
pass
class Client(object):
def __init__(self, url=''):
self.url = url
def __call__(self, url):
if not url.startswith('http://'):
url = self.url.rstrip('/') + '/' + str(url)
return self.__class__(url)
def __getattr__(self, name):
return self(name)
def __repr__(self):
return '<REST %s>' % (self.url)
@retry
def aget(self, **kw):
session = aiohttp.ClientSession()
url = URL(self.url)
if kw:
url = url.with_query(**kw)
logger.debug("GET %s", url)
try:
response = yield from session.get(url, timeout=10)
payload = yield from response.read()
finally:
yield from session.close()
response.raise_for_status()
payload = payload.decode('utf-8')
if response.content_type == 'text/x-python':
payload = ast.literal_eval(payload)
return Payload.factory(response.status, response.headers, payload)
@retry
def apost(self, headers={}, data=None, **kw):
session = aiohttp.ClientSession()
url = URL(self.url)
if kw:
url = url.with_query(**kw)
logger.debug("POST %s", url)
try:
response = yield from session.post(
url, headers=headers, data=data, timeout=10,
)
payload = yield from response.read()
finally:
yield from session.close()
response.raise_for_status()
payload = payload.decode('utf-8')
return Payload.factory(response.status, response.headers, payload)
| gpl-3.0 |
tfmoraes/invesalius3 | invesalius/gui/widgets/slice_menu.py | 4 | 11800 | # -*- coding: UTF-8 -*-
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import sys
try:
from collections import OrderedDict
except(ImportError):
from ordereddict import OrderedDict
import wx
from pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.data.slice_ as sl
import invesalius.presets as presets
from invesalius.gui.dialogs import ClutImagedataDialog
PROJECTIONS_ID = OrderedDict(((_('Normal'), const.PROJECTION_NORMAL),
(_('MaxIP'), const.PROJECTION_MaxIP),
(_('MinIP'), const.PROJECTION_MinIP),
(_('MeanIP'), const.PROJECTION_MeanIP),
(_('MIDA'), const.PROJECTION_MIDA),
(_('Contour MaxIP'), const.PROJECTION_CONTOUR_MIP),
(_('Contour MIDA'), const.PROJECTION_CONTOUR_MIDA),) )
class SliceMenu(wx.Menu):
def __init__(self):
wx.Menu.__init__(self)
self.ID_TO_TOOL_ITEM = {}
self.cdialog = None
#------------ Sub menu of the window and level ----------
submenu_wl = wx.Menu()
self._gen_event = True
#Window and level from DICOM
new_id = self.id_wl_first = wx.NewId()
wl_item = wx.MenuItem(submenu_wl, new_id,\
_('Default'), kind=wx.ITEM_RADIO)
submenu_wl.Append(wl_item)
self.ID_TO_TOOL_ITEM[new_id] = wl_item
#Case the user change window and level
new_id = self.other_wl_id = wx.NewId()
wl_item = wx.MenuItem(submenu_wl, new_id,\
_('Manual'), kind=wx.ITEM_RADIO)
submenu_wl.Append(wl_item)
self.ID_TO_TOOL_ITEM[new_id] = wl_item
for name in const.WINDOW_LEVEL:
if not(name == _('Default') or name == _('Manual')):
new_id = wx.NewId()
wl_item = wx.MenuItem(submenu_wl, new_id,\
name, kind=wx.ITEM_RADIO)
submenu_wl.Append(wl_item)
self.ID_TO_TOOL_ITEM[new_id] = wl_item
#----------- Sub menu of the save and load options ---------
#submenu_wl.AppendSeparator()
#options = [_("Save current values"),
# _("Save current values as..."),_("Load values")]
#for name in options:
# new_id = wx.NewId()
# wl_item = wx.MenuItem(submenu_wl, new_id,\
# name)
# submenu_wl.Append(wl_item)
# self.ID_TO_TOOL_ITEM[new_id] = wl_item
#------------ Sub menu of the pseudo colors ----------------
if sys.platform.startswith('linux'):
mkind = wx.ITEM_CHECK
else:
mkind = wx.ITEM_RADIO
self.pseudo_color_items = {}
submenu_pseudo_colours = wx.Menu()
self.pseudo_color_items = {}
new_id = self.id_pseudo_first = wx.NewId()
color_item = wx.MenuItem(submenu_pseudo_colours, new_id,\
_("Default "), kind=mkind)
submenu_pseudo_colours.Append(color_item)
color_item.Check(1)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
for name in sorted(const.SLICE_COLOR_TABLE):
if not(name == _("Default ")):
new_id = wx.NewId()
color_item = wx.MenuItem(submenu_wl, new_id,\
name, kind=mkind)
submenu_pseudo_colours.Append(color_item)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
self.plist_presets = presets.get_wwwl_presets()
for name in sorted(self.plist_presets):
new_id = wx.NewId()
color_item = wx.MenuItem(submenu_wl, new_id, name,
kind=mkind)
submenu_pseudo_colours.Append(color_item)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
new_id = wx.NewId()
color_item = wx.MenuItem(submenu_wl, new_id, _('Custom'),
kind=mkind)
submenu_pseudo_colours.Append(color_item)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
# --------------- Sub menu of the projection type ---------------------
self.projection_items = {}
submenu_projection = wx.Menu()
for name in PROJECTIONS_ID:
new_id = wx.NewId()
projection_item = wx.MenuItem(submenu_projection, new_id, name,
kind=wx.ITEM_RADIO)
submenu_projection.Append(projection_item)
self.ID_TO_TOOL_ITEM[new_id] = projection_item
self.projection_items[PROJECTIONS_ID[name]] = projection_item
flag_tiling = False
#------------ Sub menu of the image tiling ---------------
submenu_image_tiling = wx.Menu()
for name in sorted(const.IMAGE_TILING):
new_id = wx.NewId()
image_tiling_item = wx.MenuItem(submenu_image_tiling, new_id,\
name, kind=wx.ITEM_RADIO)
submenu_image_tiling.Append(image_tiling_item)
self.ID_TO_TOOL_ITEM[new_id] = image_tiling_item
#Save first id item
if not(flag_tiling):
self.id_tiling_first = new_id
flag_tiling = True
# Add sub itens in the menu
self.Append(-1, _("Window width and level"), submenu_wl)
self.Append(-1, _("Pseudo color"), submenu_pseudo_colours)
self.Append(-1, _("Projection type"), submenu_projection)
###self.Append(-1, _("Image Tiling"), submenu_image_tiling)
# It doesn't work in Linux
self.Bind(wx.EVT_MENU, self.OnPopup)
# In Linux the bind must be putted in the submenu
if sys.platform.startswith('linux') or sys.platform == 'darwin':
submenu_wl.Bind(wx.EVT_MENU, self.OnPopup)
submenu_pseudo_colours.Bind(wx.EVT_MENU, self.OnPopup)
submenu_image_tiling.Bind(wx.EVT_MENU, self.OnPopup)
submenu_projection.Bind(wx.EVT_MENU, self.OnPopup)
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.CheckWindowLevelOther, 'Check window and level other')
Publisher.subscribe(self.FirstItemSelect, 'Select first item from slice menu')
Publisher.subscribe(self._close, 'Close project data')
Publisher.subscribe(self._check_projection_menu, 'Check projection menu')
def FirstItemSelect(self):
item = self.ID_TO_TOOL_ITEM[self.id_wl_first]
item.Check(True)
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
if it.IsChecked():
it.Check(False)
item = self.ID_TO_TOOL_ITEM[self.id_pseudo_first]
item.Check(True)
# item = self.ID_TO_TOOL_ITEM[self.id_tiling_first]
# item.Check(True)
def CheckWindowLevelOther(self):
item = self.ID_TO_TOOL_ITEM[self.other_wl_id]
item.Check()
def _check_projection_menu(self, projection_id):
item = self.projection_items[projection_id]
item.Check()
def OnPopup(self, evt):
id = evt.GetId()
item = self.ID_TO_TOOL_ITEM[evt.GetId()]
key = item.GetItemLabelText()
if(key in const.WINDOW_LEVEL.keys()):
window, level = const.WINDOW_LEVEL[key]
Publisher.sendMessage('Bright and contrast adjustment image',
window=window, level=level)
Publisher.sendMessage('Update window level value',
window=window,
level=level)
# Publisher.sendMessage('Update window and level text',
# "WL: %d WW: %d"%(level, window))
Publisher.sendMessage('Update slice viewer')
#Necessary update the slice plane in the volume case exists
Publisher.sendMessage('Render volume viewer')
elif(key in const.SLICE_COLOR_TABLE.keys()):
values = const.SLICE_COLOR_TABLE[key]
Publisher.sendMessage('Change colour table from background image', values=values)
Publisher.sendMessage('Update slice viewer')
if sys.platform.startswith('linux'):
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
it.Check(False)
item.Check()
self.HideClutDialog()
self._gen_event = True
elif key in self.plist_presets:
values = presets.get_wwwl_preset_colours(self.plist_presets[key])
Publisher.sendMessage('Change colour table from background image from plist', values=values)
Publisher.sendMessage('Update slice viewer')
if sys.platform.startswith('linux'):
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
it.Check(False)
item.Check()
self.HideClutDialog()
self._gen_event = True
elif(key in const.IMAGE_TILING.keys()):
values = const.IMAGE_TILING[key]
Publisher.sendMessage('Set slice viewer layout', layout=values)
Publisher.sendMessage('Update slice viewer')
elif key in PROJECTIONS_ID:
pid = PROJECTIONS_ID[key]
Publisher.sendMessage('Set projection type', projection_id=pid)
Publisher.sendMessage('Reload actual slice')
elif key == _('Custom'):
if self.cdialog is None:
slc = sl.Slice()
histogram = slc.histogram
init = int(slc.matrix.min())
end = int(slc.matrix.max())
nodes = slc.nodes
self.cdialog = ClutImagedataDialog(histogram, init, end, nodes)
self.cdialog.Show()
else:
self.cdialog.Show(self._gen_event)
if sys.platform.startswith('linux'):
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
it.Check(False)
item.Check()
item = self.ID_TO_TOOL_ITEM[evt.GetId()]
item.Check(True)
self._gen_event = False
evt.Skip()
def HideClutDialog(self):
if self.cdialog:
self.cdialog.Hide()
def _close(self):
if self.cdialog:
self.cdialog.Destroy()
self.cdialog = None
| gpl-2.0 |
hanxi/cocos2d-x-v3.1 | frameworks/cocos2d-x/tools/tolua/genbindings.py | 1 | 5253 | #!/usr/bin/python
# This script is used to generate luabinding glue codes.
# Android ndk version must be ndk-r9b.
import sys
import os, os.path
import shutil
import ConfigParser
import subprocess
import re
from contextlib import contextmanager
def _check_ndk_root_env():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment."
sys.exit(1)
return NDK_ROOT
def _check_python_bin_env():
''' Checking the environment PYTHON_BIN, which will be used for building
'''
try:
PYTHON_BIN = os.environ['PYTHON_BIN']
except Exception:
print "PYTHON_BIN not defined, use current python."
PYTHON_BIN = sys.executable
return PYTHON_BIN
class CmdError(Exception):
pass
@contextmanager
def _pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def _run_cmd(command):
ret = subprocess.call(command, shell=True)
if ret != 0:
message = "Error running command"
raise CmdError(message)
def main():
cur_platform= '??'
llvm_path = '??'
ndk_root = _check_ndk_root_env()
# del the " in the path
ndk_root = re.sub(r"\"", "", ndk_root)
python_bin = _check_python_bin_env()
platform = sys.platform
if platform == 'win32':
cur_platform = 'windows'
elif platform == 'darwin':
cur_platform = platform
elif 'linux' in platform:
cur_platform = 'linux'
else:
print 'Your platform is not supported!'
sys.exit(1)
if platform == 'win32':
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s' % cur_platform))
else:
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86')))
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if os.path.isdir(x86_llvm_path):
llvm_path = x86_llvm_path
elif os.path.isdir(x64_llvm_path):
llvm_path = x64_llvm_path
else:
print 'llvm toolchain not found!'
print 'path: %s or path: %s are not valid! ' % (x86_llvm_path, x64_llvm_path)
sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
cocos_root = os.path.abspath(os.path.join(project_root, ''))
cxx_generator_root = os.path.abspath(os.path.join(project_root, 'tools/bindings-generator'))
# save config to file
config = ConfigParser.ConfigParser()
config.set('DEFAULT', 'androidndkdir', ndk_root)
config.set('DEFAULT', 'clangllvmdir', llvm_path)
config.set('DEFAULT', 'cocosdir', cocos_root)
config.set('DEFAULT', 'cxxgeneratordir', cxx_generator_root)
config.set('DEFAULT', 'extra_flags', '')
# To fix parse error on windows, we must difine __WCHAR_MAX__ and undefine __MINGW32__ .
if platform == 'win32':
config.set('DEFAULT', 'extra_flags', '-D__WCHAR_MAX__=0x7fffffff -U__MINGW32__')
conf_ini_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'userconf.ini'))
print 'generating userconf.ini...'
with open(conf_ini_file, 'w') as configfile:
config.write(configfile)
# set proper environment variables
if 'linux' in platform or platform == 'darwin':
os.putenv('LD_LIBRARY_PATH', '%s/libclang' % cxx_generator_root)
if platform == 'win32':
path_env = os.environ['PATH']
os.putenv('PATH', r'%s;%s\libclang;%s\tools\win32;' % (path_env, cxx_generator_root, cxx_generator_root))
try:
tolua_root = '%s/tools/tolua' % project_root
output_dir = '%s/cocos/scripting/lua-bindings/auto' % project_root
cmd_args = {'cocos2dx.ini' : ('cocos2d-x', 'lua_cocos2dx_auto'), \
'cocos2dx_extension.ini' : ('cocos2dx_extension', 'lua_cocos2dx_extension_auto'), \
'cocos2dx_physics.ini' : ('cocos2dx_physics', 'lua_cocos2dx_physics_auto'), \
}
target = 'lua'
generator_py = '%s/generator.py' % cxx_generator_root
for key in cmd_args.keys():
args = cmd_args[key]
cfg = '%s/%s' % (tolua_root, key)
print 'Generating bindings for %s...' % (key[:-4])
command = '%s %s %s -s %s -t %s -o %s -n %s' % (python_bin, generator_py, cfg, args[0], target, output_dir, args[1])
_run_cmd(command)
if platform == 'win32':
with _pushd(output_dir):
_run_cmd('dos2unix *')
print '---------------------------------'
print 'Generating lua bindings succeeds.'
print '---------------------------------'
except Exception as e:
if e.__class__.__name__ == 'CmdError':
print '---------------------------------'
print 'Generating lua bindings fails.'
print '---------------------------------'
sys.exit(1)
else:
raise
# -------------- main --------------
if __name__ == '__main__':
main()
| mit |
gooftroop/Zeus | contrib/sqlalchemy/dialects/sqlite/base.py | 45 | 54601 | # sqlite/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite
:name: SQLite
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQlite is used. The implementation classes are
:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ - in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among
other things, this means that any type name which contains the
substring ``"INT"`` will be determined to be of "integer affinity". A
type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by
SQLite to be of "integer" affinity. However, **the SQLite
autoincrement feature, whether implicitly or explicitly enabled,
requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an
application uses a type like :class:`.BigInteger` for a primary key, on
SQLite this type will need to be rendered as the name ``"INTEGER"`` when
emitting the initial ``CREATE TABLE`` statement in order for the autoincrement
behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name
to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQlite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<http://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level
----------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the `PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
http://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`~.types.BIGINT`, :class:`~.types.BLOB`,
:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`,
:class:`~.types.CHAR`, :class:`~.types.DATE`,
:class:`~.types.DATETIME`, :class:`~.types.FLOAT`,
:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`,
:class:`~.types.INTEGER`, :class:`~.types.INTEGER`,
:class:`~.types.NUMERIC`, :class:`~.types.REAL`,
:class:`~.types.SMALLINT`, :class:`~.types.TEXT`,
:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`,
:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`,
:class:`~.types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`~.types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`~.types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`~.types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`~.types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`~.types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver has a bug which requires that SQLAlchemy filter out these
dots in result sets.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \\
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.execute("create table x (a integer, b integer)")
conn.execute("insert into x (a, b) values (1, 1)")
conn.execute("insert into x (a, b) values (2, 2)")
result = conn.execute("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API.
In the very specific case where
an application is forced to use column names that contain dots, and the
functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()`
is required to return these dotted names unmodified, the ``sqlite_raw_colnames``
execution option may be provided, either on a per-:class:`.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
"""
import datetime
import re
from ... import processors
from ... import sql, exc
from ... import types as sqltypes, schema as sa_schema
from ... import util
from ...engine import default, reflection
from ...sql import compiler
from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT,
INTEGER, REAL, NUMERIC, SMALLINT, TEXT,
TIMESTAMP, VARCHAR)
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0, "month": 0, "day": 0, "hour": 0,
"minute": 0, "second": 0, "microsecond": 0
}
return bool(re.search(r'[^0-9]', spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\
%(second)02d.%(microsecond)06d"
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\
%(min)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop('truncate_microseconds', False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert 'storage_format' not in kwargs, "You can specify only "\
"one of truncate_microseconds or storage_format."
assert 'regexp' not in kwargs, "You can specify only one of "\
"truncate_microseconds or regexp."
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format % {
'year': value.year,
'month': value.month,
'day': value.day,
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'microsecond': value.microsecond,
}
elif isinstance(value, datetime_date):
return format % {
'year': value.year,
'month': value.month,
'day': value.day,
'hour': 0,
'minute': 0,
'second': 0,
'microsecond': 0,
}
else:
raise TypeError("SQLite DateTime type only accepts Python "
"datetime and date objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format % {
'year': value.year,
'month': value.month,
'day': value.day,
}
else:
raise TypeError("SQLite Date type only accepts Python "
"date objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(
storage_format="%(hour)02d-%(minute)02d-%(second)02d-\
%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop('truncate_microseconds', False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert 'storage_format' not in kwargs, "You can specify only "\
"one of truncate_microseconds or storage_format."
assert 'regexp' not in kwargs, "You can specify only one of "\
"truncate_microseconds or regexp."
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format % {
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'microsecond': value.microsecond,
}
else:
raise TypeError("SQLite Time type only accepts Python "
"time objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.Time: TIME,
}
ischema_names = {
'BIGINT': sqltypes.BIGINT,
'BLOB': sqltypes.BLOB,
'BOOL': sqltypes.BOOLEAN,
'BOOLEAN': sqltypes.BOOLEAN,
'CHAR': sqltypes.CHAR,
'DATE': sqltypes.DATE,
'DATE_CHAR': sqltypes.DATE,
'DATETIME': sqltypes.DATETIME,
'DATETIME_CHAR': sqltypes.DATETIME,
'DOUBLE': sqltypes.FLOAT,
'DECIMAL': sqltypes.DECIMAL,
'FLOAT': sqltypes.FLOAT,
'INT': sqltypes.INTEGER,
'INTEGER': sqltypes.INTEGER,
'NUMERIC': sqltypes.NUMERIC,
'REAL': sqltypes.REAL,
'SMALLINT': sqltypes.SMALLINT,
'TEXT': sqltypes.TEXT,
'TIME': sqltypes.TIME,
'TIME_CHAR': sqltypes.TIME,
'TIMESTAMP': sqltypes.TIMESTAMP,
'VARCHAR': sqltypes.VARCHAR,
'NVARCHAR': sqltypes.NVARCHAR,
'NCHAR': sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'month': '%m',
'day': '%d',
'year': '%Y',
'second': '%S',
'hour': '%H',
'doy': '%j',
'minute': '%M',
'epoch': '%s',
'dow': '%w',
'week': '%W',
})
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw)
)
except KeyError:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ''
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
if (column.primary_key and
column.table.dialect_options['sqlite']['autoincrement'] and
len(column.table.primary_key.columns) == 1 and
issubclass(column.type._type_affinity, sqltypes.Integer) and
not column.foreign_keys):
colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (c.primary_key and
c.table.dialect_options['sqlite']['autoincrement'] and
issubclass(c.type._type_affinity, sqltypes.Integer) and
not c.foreign_keys):
return None
return super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint)
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(
SQLiteDDLCompiler,
self).visit_foreign_key_constraint(constraint)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(self, create):
index = create.element
text = super(SQLiteDDLCompiler, self).visit_create_index(
create, include_table_schema=False)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False,
literal_binds=True)
text += " WHERE " + where_compiled
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if not isinstance(type_, _DateTimeMixin) or \
type_.format_is_text_affinity:
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if not isinstance(type_, _DateTimeMixin) or \
type_.format_is_text_affinity:
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if not isinstance(type_, _DateTimeMixin) or \
type_.format_is_text_affinity:
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'attach', 'autoincrement', 'before', 'begin', 'between', 'by',
'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',
'conflict', 'constraint', 'create', 'cross', 'current_date',
'current_time', 'current_timestamp', 'database', 'default',
'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on',
'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
'raise', 'references', 'reindex', 'rename', 'replace', 'restrict',
'right', 'rollback', 'row', 'select', 'set', 'table', 'temp',
'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union',
'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual',
'when', 'where',
])
def format_index(self, index, use_schema=True, name=None):
"""Prepare a quoted index and schema name."""
if name is None:
name = index.name
result = self.quote(name, index.quote)
if (not self.omit_schema and
use_schema and
getattr(index.table, "schema", None)):
result = self.quote_schema(
index.table.schema, index.table.quote_schema) + "." + result
return result
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = 'sqlite'
supports_alter = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
supports_right_nested_joins = False
default_paramstyle = 'qmark'
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
supports_cast = True
supports_default_values = True
construct_arguments = [
(sa_schema.Table, {
"autoincrement": False
}),
(sa_schema.Index, {
"where": None,
}),
]
_broken_fk_pragma_quotes = False
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
self.supports_default_values = (
self.dbapi.sqlite_version_info >= (3, 3, 8))
self.supports_cast = (
self.dbapi.sqlite_version_info >= (3, 2, 3))
self.supports_multivalues_insert = (
# http://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info >= (3, 7, 11))
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = (
self.dbapi.sqlite_version_info < (3, 6, 14))
_isolation_lookup = {
'READ UNCOMMITTED': 1,
'SERIALIZABLE': 0,
}
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('PRAGMA read_uncommitted')
res = cursor.fetchone()
if res:
value = res[0]
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s "
"WHERE type='table' ORDER BY name") % (master,)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = "SELECT name FROM sqlite_temp_master "\
"WHERE type='table' ORDER BY name "
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = "SELECT name FROM sqlite_temp_master "\
"WHERE type='view' ORDER BY name "
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema)
return bool(info)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s "
"WHERE type='view' ORDER BY name") % (master,)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
s = ("SELECT sql FROM %s WHERE name = '%s'"
"AND type='view'") % (master, view_name)
rs = connection.execute(s)
else:
try:
s = ("SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type='view'") % view_name
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type='view'") % view_name
rs = connection.execute(s)
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema)
columns = []
for row in info:
(name, type_, nullable, default, primary_key) = (
row[1], row[2].upper(), not row[3], row[4], row[5])
columns.append(self._get_column_info(name, type_, nullable,
default, primary_key))
return columns
def _get_column_info(self, name, type_, nullable, default, primary_key):
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = util.text_type(default)
return {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': default is None,
'primary_key': primary_key,
}
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity tules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in http://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by regcognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r'([\w ]+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ''
args = ''
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif 'INT' in coltype:
coltype = sqltypes.INTEGER
elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype:
coltype = sqltypes.TEXT
elif 'BLOB' in coltype or not coltype:
coltype = sqltypes.NullType
elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r'(\d+)', args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments." %
(coltype, args))
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols = self.get_columns(connection, table_name, schema, **kw)
pkeys = []
for col in cols:
if col['primary_key']:
pkeys.append(col['name'])
return {'constrained_columns': pkeys, 'name': None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list",
table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (
row[0], row[2], row[3], row[4])
if rcol is None:
rcol = lcol
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': rtbl,
'referred_columns': [],
}
fks[numerical_id] = fk
fk['constrained_columns'].append(lcol)
fk['referred_columns'].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return tuple(constrained_columns) + (referred_table,) + \
tuple(referred_columns)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk['constrained_columns'],
fk['referred_table'], fk['referred_columns']),
fk
) for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
'(?:CONSTRAINT (\w+) +)?'
'FOREIGN KEY *\( *(.+?) *\) +'
'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\)'
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name, constrained_columns,
referred_quoted_name, referred_name,
referred_columns) = match.group(1, 2, 3, 4, 5)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns))
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns))
referred_name = referred_quoted_name or referred_name
yield (
constraint_name, constrained_columns,
referred_name, referred_columns)
fkeys = []
for (
constraint_name, constrained_columns,
referred_name, referred_columns) in parse_fks():
sig = fk_sig(
constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (
sig,
table_name
))
continue
key = keys_by_signature.pop(sig)
key['name'] = constraint_name
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection, table_name, schema=schema,
include_auto_indexes=True, **kw):
if not idx['name'].startswith("sqlite_autoindex"):
continue
sig = tuple(idx['column_names'])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = '(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
'(?:(".+?")|([a-z0-9]+)) '
'+[a-z0-9_ ]+? +UNIQUE')
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2)))
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {
'name': name,
'column_names': cols
}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema)
indexes = []
include_auto_indexes = kw.pop('include_auto_indexes', False)
for row in pragma_indexes:
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if (not include_auto_indexes and
row[1].startswith('sqlite_autoindex')):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in indexes:
pragma_index = self._get_table_pragma(
connection, "index_info", idx['name'])
for row in pragma_index:
idx['column_names'].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
try:
s = ("SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type = 'table'") % table_name
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type = 'table'") % table_name
rs = connection.execute(s)
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statement = "PRAGMA %s." % quote(schema)
else:
statement = "PRAGMA "
qtable = quote(table_name)
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.execute(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# http://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
return result
| mit |
lixiangning888/whole_project | modules/signatures_orginal_20151110/dyre_apis.py | 1 | 6073 | # Copyright (C) 2015 Optiv, Inc. (brad.spengler@optiv.com), KillerInstinct
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
import re2 as re
except ImportError:
import re
from lib.cuckoo.common.abstracts import Signature
class Dyre_APIs(Signature):
name = "dyre_behavior"
description = "Exhibits behavior characteristic of Dyre malware"
weight = 3
severity = 3
categories = ["banker", "trojan"]
families = ["dyre"]
authors = ["Optiv", "KillerInstinct"]
minimum = "1.3"
evented = True
# Try to parse a process memory dump to extract regex extract C2 nodes.
extract_c2s = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.cryptoapis = False
self.networkapis = set()
self.syncapis = False
self.compname = self.get_environ_entry(self.get_initial_process(),
"ComputerName")
filter_apinames = set(["CryptHashData", "HttpOpenRequestA",
"NtCreateNamedPipeFile"])
def on_call(self, call, process):
# Legacy, modern Dyre doesn't have hardcoded hashes in
# CryptHashData anymore
iocs = [
"J7dnlDvybciDvu8d46D\\x00",
"qwererthwebfsdvjaf+\\x00",
]
pipe = [
"\\??\\pipe\\3obdw5e5w4",
"\\??\\pipe\\g2fabg5713",
]
if call["api"] == "CryptHashData":
buf = self.get_argument(call, "Buffer")
if buf in iocs:
self.cryptoapis = True
tmp = re.sub(r"\\x[0-9A-Fa-f]{2}", "", buf)
if self.compname in tmp:
if re.match("^" + self.compname + "[0-9 ]+$", tmp):
self.cryptoapis = True
elif call["api"] == "HttpOpenRequestA":
buf = self.get_argument(call, "Path")
if len(buf) > 10:
self.networkapis.add(buf)
elif call["api"] == "NtCreateNamedPipeFile":
buf = self.get_argument(call, "PipeName")
for npipe in pipe:
if buf == npipe:
self.syncapis = True
break
return None
def on_complete(self):
ret = False
networkret = False
campaign = set()
mutexs = [
"^(Global|Local)\\\\pen3j3832h$",
"^(Global|Local)\\\\u1nyj3rt20",
]
for mutex in mutexs:
if self.check_mutex(pattern=mutex, regex=True):
self.syncapis = True
break
# C2 Beacon check
if self.networkapis:
# Gather computer name
for httpreq in self.networkapis:
# Generate patterns (should only ever be one per indicator)
indicators = [
"/(\d{4}[a-z]{2}\d{2})/" + self.compname + "_",
"/([^/]+)/" + self.compname + "/\d+/\d+/\d+/$",
"/([^/]+)/" + self.compname + "_W\d{6}\.[0-9A-F]{32}",
]
for indicator in indicators:
buf = re.match(indicator, httpreq)
if buf:
networkret = True
campaign.add(buf.group(1))
# Check if there are any winners
if self.cryptoapis or self.syncapis or networkret:
ret = True
if (self.cryptoapis or self.syncapis) and networkret:
self.confidence = 100
self.description = "Exhibits behaviorial and network characteristics of Upatre+Dyre/Mini-Dyre malware"
for camp in campaign:
self.data.append({"Campaign": camp})
elif networkret:
self.description = "Exhibits network behavior characteristic of Upatre+Dyre/Mini-Dyre malware"
for camp in campaign:
self.data.append({"Campaign": camp})
if self.extract_c2s:
dump_pid = 0
for proc in self.results["behavior"]["processtree"]:
for child in proc["children"]:
# Look for lowest PID svchost.exe
if not dump_pid or child["pid"] < dump_pid:
if child["name"] == "svchost.exe":
dump_pid = child["pid"]
if dump_pid:
dump_path = ""
if len(self.results["procmemory"]):
for memdump in self.results["procmemory"]:
if dump_pid == memdump["pid"]:
dump_path = memdump["file"]
if dump_path:
whitelist = [
"1.2.3.4",
"0.0.0.0",
]
with open(dump_path, "rb") as dump_file:
dump_data = dump_file.read()
ippat = "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{2,5}"
ips = re.findall(ippat, dump_data)
for ip in set(ips):
addit = True
for item in whitelist:
if ip.startswith(item):
addit = False
if addit:
self.data.append({"C2": ip})
return ret
| lgpl-3.0 |
cdrttn/samba-regedit | lib/ntdb/test/python-api.py | 7 | 4389 | #!/usr/bin/env python
# Some simple tests for the Python bindings for TDB
# Note that this tests the interface of the Python bindings
# It does not test tdb itself.
#
# Copyright (C) 2007-2013 Jelmer Vernooij <jelmer@samba.org>
# Published under the GNU LGPLv3 or later
import ntdb
from unittest import TestCase
import os, tempfile
class OpenTdbTests(TestCase):
def test_nonexistent_read(self):
self.assertRaises(IOError, ntdb.Ntdb, "/some/nonexistent/file", 0,
ntdb.DEFAULT, os.O_RDWR)
class CloseTdbTests(TestCase):
def test_double_close(self):
self.ntdb = ntdb.Ntdb(tempfile.mkstemp()[1], ntdb.DEFAULT,
os.O_CREAT|os.O_RDWR)
self.assertNotEqual(None, self.ntdb)
# ensure that double close does not crash python
self.ntdb.close()
self.ntdb.close()
# Check that further operations do not crash python
self.assertRaises(RuntimeError, lambda: self.ntdb.transaction_start())
self.assertRaises(RuntimeError, lambda: self.ntdb["bar"])
class InternalTdbTests(TestCase):
def test_repr(self):
self.ntdb = ntdb.Ntdb()
# repr used to crash on internal db
self.assertEquals(repr(self.ntdb), "Ntdb(<internal>)")
class SimpleTdbTests(TestCase):
def setUp(self):
super(SimpleTdbTests, self).setUp()
self.ntdb = ntdb.Ntdb(tempfile.mkstemp()[1], ntdb.DEFAULT,
os.O_CREAT|os.O_RDWR)
self.assertNotEqual(None, self.ntdb)
def tearDown(self):
del self.ntdb
def test_repr(self):
self.assertTrue(repr(self.ntdb).startswith("Ntdb('"))
def test_lockall(self):
self.ntdb.lock_all()
def test_unlockall(self):
self.ntdb.lock_all()
self.ntdb.unlock_all()
def test_lockall_read(self):
self.ntdb.read_lock_all()
self.ntdb.read_unlock_all()
def test_store(self):
self.ntdb.store("bar", "bla")
self.assertEquals("bla", self.ntdb.get("bar"))
def test_getitem(self):
self.ntdb["bar"] = "foo"
self.assertEquals("foo", self.ntdb["bar"])
def test_delete(self):
self.ntdb["bar"] = "foo"
del self.ntdb["bar"]
self.assertRaises(KeyError, lambda: self.ntdb["bar"])
def test_contains(self):
self.ntdb["bla"] = "bloe"
self.assertTrue("bla" in self.ntdb)
def test_keyerror(self):
self.assertRaises(KeyError, lambda: self.ntdb["bla"])
def test_name(self):
self.ntdb.filename
def test_iterator(self):
self.ntdb["bla"] = "1"
self.ntdb["brainslug"] = "2"
l = list(self.ntdb)
l.sort()
self.assertEquals(["bla", "brainslug"], l)
def test_transaction_cancel(self):
self.ntdb["bloe"] = "2"
self.ntdb.transaction_start()
self.ntdb["bloe"] = "1"
self.ntdb.transaction_cancel()
self.assertEquals("2", self.ntdb["bloe"])
def test_transaction_commit(self):
self.ntdb["bloe"] = "2"
self.ntdb.transaction_start()
self.ntdb["bloe"] = "1"
self.ntdb.transaction_commit()
self.assertEquals("1", self.ntdb["bloe"])
def test_transaction_prepare_commit(self):
self.ntdb["bloe"] = "2"
self.ntdb.transaction_start()
self.ntdb["bloe"] = "1"
self.ntdb.transaction_prepare_commit()
self.ntdb.transaction_commit()
self.assertEquals("1", self.ntdb["bloe"])
def test_iterkeys(self):
self.ntdb["bloe"] = "2"
self.ntdb["bla"] = "25"
i = self.ntdb.iterkeys()
self.assertEquals(set(["bloe", "bla"]), set([i.next(), i.next()]))
def test_clear(self):
self.ntdb["bloe"] = "2"
self.ntdb["bla"] = "25"
self.assertEquals(2, len(list(self.ntdb)))
self.ntdb.clear()
self.assertEquals(0, len(list(self.ntdb)))
def test_len(self):
self.assertEquals(0, len(list(self.ntdb)))
self.ntdb["entry"] = "value"
self.assertEquals(1, len(list(self.ntdb)))
def test_add_flags(self):
self.ntdb.add_flag(ntdb.NOMMAP)
self.ntdb.remove_flag(ntdb.NOMMAP)
class VersionTests(TestCase):
def test_present(self):
self.assertTrue(isinstance(ntdb.__version__, str))
if __name__ == '__main__':
import unittest
unittest.TestProgram()
| gpl-3.0 |
junmin-zhu/chromium-rivertrail | third_party/closure_linter/closure_linter/requireprovidesorter.py | 137 | 9826 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
A tuple containing the first provide token in the token stream and a list
of provided objects sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return [provide_tokens[0], sorted_provide_strings]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
A tuple containing the first require token in the token stream and a list
of required dependencies sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return (require_tokens[0], sorted_require_strings)
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in ['goog.require', 'goog.provide']:
# The goog.provide and goog.require identifiers are at the top of the
# file. So if any other identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
name = tokenutil.Search(token, Type.STRING_TEXT).string
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.Search(token, Type.STRING_TEXT).string
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while previous_first_token.IsAnyType(Type.COMMENT_TYPES):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list
| bsd-3-clause |
dmccue/ansible | v1/ansible/inventory/host.py | 132 | 2084 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C
from ansible import utils
class Host(object):
''' a single ansible host '''
__slots__ = [ 'name', 'vars', 'groups' ]
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port))
if self.name is None:
raise Exception("host name is required")
def add_group(self, group):
self.groups.append(group)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
groups = {}
for g in self.groups:
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
return groups.values()
def get_variables(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
results = utils.combine_vars(results, group.get_variables())
results = utils.combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
| gpl-3.0 |
mcanthony/nupic | src/nupic/datafiles/extra/secondOrder/makeDataset.py | 34 | 18958 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file_record_stream import FileRecordStream
def _generateModel0(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model0'. For this model, we generate the following
set of sequences:
1-2-3 (4X)
1-2-4 (1X)
5-2-3 (1X)
5-2-4 (4X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# ===============================================================
# Let's model the following:
# a-b-c (4X)
# a-b-d (1X)
# e-b-c (1X)
# e-b-d (4X)
# --------------------------------------------------------------------
# Initial probabilities, 'a' and 'e' equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[4] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 'a' and 'e' should lead to 'b'
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 4:
probs.fill(0)
probs[1] = 1.0 # lead only to b
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# a-b should lead to c 80% and d 20%
# e-b should lead to c 20% and d 80%
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,1]):
probs.fill(0)
probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time
probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time
elif key == str([4,1]):
probs.fill(0)
probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time
probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel1(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 0 and 1 equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[1] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 0 and 1 should lead to 10,11,12,13,14 with equal probability
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 1:
indices = numpy.array([10,11,12,13,14])
probs.fill(0)
probs[indices] = 1.0 # lead only to b
probs /= probs.sum()
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# 0-10 should lead to 15
# 0-11 to 16
# ...
# 1-10 should lead to 20
# 1-11 shold lean to 21
# ...
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,10]):
probs.fill(0)
probs[15] = 1
elif key == str([0,11]):
probs.fill(0)
probs[16] = 1
elif key == str([0,12]):
probs.fill(0)
probs[17] = 1
elif key == str([0,13]):
probs.fill(0)
probs[18] = 1
elif key == str([0,14]):
probs.fill(0)
probs[19] = 1
elif key == str([1,10]):
probs.fill(0)
probs[20] = 1
elif key == str([1,11]):
probs.fill(0)
probs[21] = 1
elif key == str([1,12]):
probs.fill(0)
probs[22] = 1
elif key == str([1,13]):
probs.fill(0)
probs[23] = 1
elif key == str([1,14]):
probs.fill(0)
probs[24] = 1
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel2(numCategories, alpha=0.25):
""" Generate the initial, first order, and second order transition
probabilities for 'model2'. For this model, we generate peaked random
transitions using dirichlet distributions.
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
alpha: Determines the peakedness of the transitions. Low alpha
values (alpha=0.01) place the entire weight on a single
transition. Large alpha values (alpha=10) distribute the
evenly among all transitions. Intermediate values (alpha=0.5)
give a moderately peaked transitions.
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
Here is an example of some return values for an intermediate alpha value:
initProb: [0.33, 0.33, 0.33]
firstOrder: {'[0]': [0.2, 0.7, 0.1],
'[1]': [0.1, 0.1, 0.8],
'[2]': [0.1, 0.0, 0.9]}
secondOrder: {'[0,0]': [0.1, 0.0, 0.9],
'[0,1]': [0.0, 0.2, 0.8],
'[0,2]': [0.1, 0.8, 0.1],
...
'[2,2]': [0.8, 0.2, 0.0]}
"""
# --------------------------------------------------------------------
# All initial probabilities, are equally likely
initProb = numpy.ones(numCategories)/numCategories
def generatePeakedProbabilities(lastIdx,
numCategories=numCategories,
alpha=alpha):
probs = numpy.random.dirichlet(alpha=[alpha]*numCategories)
probs[lastIdx] = 0.0
probs /= probs.sum()
return probs
# --------------------------------------------------------------------
# 1st order transitions
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = generatePeakedProbabilities(catIdx)
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = generatePeakedProbabilities(secondIdx)
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, None)
def _generateFile(filename, numRecords, categoryList, initProb,
firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None):
""" Generate a set of records reflecting a set of probabilities.
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
numRecords: number of records to generate
categoryList: list of category names
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrderProb: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrderProb: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
numNoise: Number of noise elements to place between each
sequence. The noise elements are evenly distributed from
all categories.
resetsEvery: If not None, generate a reset every N records
Here is an example of some parameters:
categoryList: ['cat1', 'cat2', 'cat3']
initProb: [0.7, 0.2, 0.1]
firstOrderProb: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# Create the file
print "Creating %s..." % (filename)
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
outFile = FileRecordStream(filename, write=True, fields=fields)
# --------------------------------------------------------------------
# Convert the probabilitie tables into cumulative probabilities
initCumProb = initProb.cumsum()
firstOrderCumProb = dict()
for (key,value) in firstOrderProb.iteritems():
firstOrderCumProb[key] = value.cumsum()
secondOrderCumProb = dict()
for (key,value) in secondOrderProb.iteritems():
secondOrderCumProb[key] = value.cumsum()
# --------------------------------------------------------------------
# Write out the sequences
elementsInSeq = []
numElementsSinceReset = 0
maxCatIdx = len(categoryList) - 1
for i in xrange(numRecords):
# Generate a reset?
if numElementsSinceReset == 0:
reset = 1
else:
reset = 0
# Pick the next element, based on how are we are into the 2nd order
# sequence.
rand = numpy.random.rand()
if len(elementsInSeq) == 0:
catIdx = numpy.searchsorted(initCumProb, rand)
elif len(elementsInSeq) == 1:
catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)
elif (len(elementsInSeq) >=2) and \
(seqLen is None or len(elementsInSeq) < seqLen-numNoise):
catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand)
else: # random "noise"
catIdx = numpy.random.randint(len(categoryList))
# Write out the record
catIdx = min(maxCatIdx, catIdx)
outFile.appendRecord([reset,categoryList[catIdx]])
#print categoryList[catIdx]
# ------------------------------------------------------------
# Increment counters
elementsInSeq.append(catIdx)
numElementsSinceReset += 1
# Generate another reset?
if resetsEvery is not None and numElementsSinceReset == resetsEvery:
numElementsSinceReset = 0
elementsInSeq = []
# Start another 2nd order sequence?
if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):
elementsInSeq = []
outFile.close()
def generate(model, filenameTrain, filenameTest, filenameCategory,
numCategories=178, numTrainingRecords=1000,
numTestingRecords=100, numNoise=5, resetsEvery=None):
numpy.random.seed(41)
# =====================================================================
# Create our categories and category file.
print "Creating %s..." % (filenameCategory)
categoryList = ['cat%d' % i for i in range(1, numCategories+1)]
categoryFile = open(filenameCategory, 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
# ====================================================================
# Generate the model
if model == 'model0':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel0(numCategories)
elif model == 'model1':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel1(numCategories)
elif model == 'model2':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel2(numCategories)
else:
raise RuntimeError("Unsupported model")
# ====================================================================
# Generate the training and testing files
_generateFile(filename=filenameTrain, numRecords=numTrainingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
_generateFile(filename=filenameTest, numRecords=numTestingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
| agpl-3.0 |
Nic30/hwtLib | hwtLib/tests/all.py | 1 | 22364 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from unittest import TestLoader, TextTestRunner, TestSuite
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.abstract.busEndpoint_test import BusEndpointTC
from hwtLib.abstract.frame_utils.alignment_utils_test import FrameAlignmentUtilsTC
from hwtLib.abstract.frame_utils.join.test import FrameJoinUtilsTC
from hwtLib.abstract.template_configured_test import TemplateConfigured_TC
from hwtLib.amba.axiLite_comp.buff_test import AxiRegTC
from hwtLib.amba.axiLite_comp.endpoint_arr_test import AxiLiteEndpointArrTCs
from hwtLib.amba.axiLite_comp.endpoint_fromInterfaces_test import \
AxiLiteEndpoint_fromInterfaceTC, AxiLiteEndpoint_fromInterface_arr_TC
from hwtLib.amba.axiLite_comp.endpoint_struct_test import \
AxiLiteEndpoint_arrayStruct_TC, AxiLiteEndpoint_struct_TC
from hwtLib.amba.axiLite_comp.endpoint_test import AxiLiteEndpointTCs
from hwtLib.amba.axiLite_comp.to_axi_test import AxiLite_to_Axi_TC
from hwtLib.amba.axi_comp.cache.cacheWriteAllocWawOnlyWritePropagating_test import AxiCacheWriteAllocWawOnlyWritePropagatingTCs
from hwtLib.amba.axi_comp.cache.pseudo_lru_test import PseudoLru_TC
from hwtLib.amba.axi_comp.interconnect.matrixAddrCrossbar_test import\
AxiInterconnectMatrixAddrCrossbar_TCs
from hwtLib.amba.axi_comp.interconnect.matrixCrossbar_test import \
AxiInterconnectMatrixCrossbar_TCs
from hwtLib.amba.axi_comp.interconnect.matrixR_test import AxiInterconnectMatrixR_TCs
from hwtLib.amba.axi_comp.interconnect.matrixW_test import AxiInterconnectMatrixW_TCs
from hwtLib.amba.axi_comp.lsu.read_aggregator_test import AxiReadAggregator_TCs
from hwtLib.amba.axi_comp.lsu.store_queue_write_propagating_test import AxiStoreQueueWritePropagating_TCs
from hwtLib.amba.axi_comp.lsu.write_aggregator_test import AxiWriteAggregator_TCs
from hwtLib.amba.axi_comp.oooOp.examples.counterArray_test import OooOpExampleCounterArray_TCs
from hwtLib.amba.axi_comp.oooOp.examples.counterHashTable_test import OooOpExampleCounterHashTable_TC
from hwtLib.amba.axi_comp.resize_test import AxiResizeTC
from hwtLib.amba.axi_comp.sim.ag_test import Axi_ag_TC
from hwtLib.amba.axi_comp.slave_timeout_test import AxiSlaveTimeoutTC
from hwtLib.amba.axi_comp.static_remap_test import AxiStaticRemapTCs
from hwtLib.amba.axi_comp.stream_to_mem_test import Axi4_streamToMemTC
from hwtLib.amba.axi_comp.tester_test import AxiTesterTC
from hwtLib.amba.axi_comp.to_axiLite_test import Axi_to_AxiLite_TC
from hwtLib.amba.axi_test import AxiTC
from hwtLib.amba.axis_comp.en_test import AxiS_en_TC
from hwtLib.amba.axis_comp.fifoDrop_test import AxiSFifoDropTC
from hwtLib.amba.axis_comp.fifoMeasuring_test import AxiS_fifoMeasuringTC
from hwtLib.amba.axis_comp.frameGen_test import AxisFrameGenTC
from hwtLib.amba.axis_comp.frame_deparser.test import AxiS_frameDeparser_TC
from hwtLib.amba.axis_comp.frame_join.test import AxiS_FrameJoin_TCs
from hwtLib.amba.axis_comp.frame_parser.footer_split_test import AxiS_footerSplitTC
from hwtLib.amba.axis_comp.frame_parser.test import AxiS_frameParserTC
from hwtLib.amba.axis_comp.resizer_test import AxiS_resizer_TCs
from hwtLib.amba.axis_comp.storedBurst_test import AxiSStoredBurstTC
from hwtLib.amba.axis_comp.strformat_test import AxiS_strFormat_TC
from hwtLib.amba.datapump.interconnect.rStrictOrder_test import \
RStrictOrderInterconnectTC
from hwtLib.amba.datapump.interconnect.wStrictOrderComplex_test import \
WStrictOrderInterconnectComplexTC
from hwtLib.amba.datapump.interconnect.wStrictOrder_test import \
WStrictOrderInterconnectTC, WStrictOrderInterconnect2TC
from hwtLib.amba.datapump.r_aligned_test import Axi_rDatapump_alignedTCs
from hwtLib.amba.datapump.r_unaligned_test import Axi_rDatapump_unalignedTCs
from hwtLib.amba.datapump.w_test import Axi_wDatapumpTCs
from hwtLib.avalon.axiToMm_test import AxiToAvalonMm_TCs
from hwtLib.avalon.endpoint_test import AvalonMmEndpointTCs
from hwtLib.avalon.mm_buff_test import AvalonMmBuff_TC
from hwtLib.avalon.sim.mmAgent_test import AvalonMmAgentTC
from hwtLib.avalon.sim.stAgent_test import AvalonStAgentTC
from hwtLib.cesnet.mi32.axi4Lite_bridges_test import Mi32Axi4LiteBrigesTC
from hwtLib.cesnet.mi32.endpoint_test import Mi32EndpointTCs
from hwtLib.cesnet.mi32.interconnectMatrix_test import Mi32InterconnectMatrixTC
from hwtLib.cesnet.mi32.mi32agent_test import Mi32AgentTC
from hwtLib.cesnet.mi32.sliding_window_test import Mi32SlidingWindowTC
from hwtLib.cesnet.mi32.to_axi4Lite_test import Mi32_to_Axi4LiteTC
from hwtLib.clocking.cdc_test import CdcTC
from hwtLib.common_nonstd_interfaces.addr_data_hs_to_Axi_test import AddrDataHs_to_Axi_TCs
from hwtLib.examples.arithmetic.cntr_test import CntrTC, CntrResourceAnalysisTC
from hwtLib.examples.arithmetic.multiplierBooth_test import MultiplierBoothTC
from hwtLib.examples.arithmetic.privateSignals_test import PrivateSignalsOfStructTypeTC
from hwtLib.examples.arithmetic.selfRefCntr_test import SelfRefCntrTC
from hwtLib.examples.arithmetic.twoCntrs_test import TwoCntrsTC
from hwtLib.examples.arithmetic.vhdl_vector_auto_casts import VhdlVectorAutoCastExampleTC
from hwtLib.examples.arithmetic.widthCasting import WidthCastingExampleTC
from hwtLib.examples.axi.debugbusmonitor_test import DebugBusMonitorExampleAxiTC
from hwtLib.examples.axi.simpleAxiRegs_test import SimpleAxiRegsTC
from hwtLib.examples.builders.ethAddrUpdater_test import EthAddrUpdaterTCs
from hwtLib.examples.builders.handshakedBuilderSimple import \
HandshakedBuilderSimpleTC
from hwtLib.examples.builders.hsBuilderSplit_test import HsBuilderSplit_TC
from hwtLib.examples.builders.hwException_test import HwExceptionCatch_TC
from hwtLib.examples.builders.pingResponder_test import PingResponderTC
from hwtLib.examples.emptyUnitWithSpi import EmptyUnitWithSpiTC
from hwtLib.examples.errors.combLoops import CombLoopAnalysisTC
from hwtLib.examples.errors.errors_test import ErrorsTC
from hwtLib.examples.hdlComments_test import HdlCommentsTC
from hwtLib.examples.hdlObjLists.listOfInterfaces0 import ListOfInterfacesSample0TC
from hwtLib.examples.hdlObjLists.listOfInterfaces1 import ListOfInterfacesSample1TC
from hwtLib.examples.hdlObjLists.listOfInterfaces2 import ListOfInterfacesSample2TC
from hwtLib.examples.hdlObjLists.listOfInterfaces3 import ListOfInterfacesSample3TC
from hwtLib.examples.hdlObjLists.listOfInterfaces4 import ListOfInterfacesSample4TC
from hwtLib.examples.hierarchy.hierarchySerialization_test import \
HierarchySerializationTC
from hwtLib.examples.hierarchy.simpleSubunit2 import SimpleSubunit2TC
from hwtLib.examples.hierarchy.simpleSubunit3 import SimpleSubunit3TC
from hwtLib.examples.hierarchy.simpleSubunit_test import SimpleSubunitTC
from hwtLib.examples.hierarchy.unitToUnitConnection import \
UnitToUnitConnectionTC
from hwtLib.examples.hierarchy.unitWrapper_test import UnitWrapperTC
from hwtLib.examples.mem.avalonmm_ram_test import AvalonMmBram_TC
from hwtLib.examples.mem.axi_ram_test import Axi4BRam_TC
from hwtLib.examples.mem.bram_wire import BramWireTC
from hwtLib.examples.mem.ram_test import RamResourcesTC, \
SimpleAsyncRamTC, SimpleSyncRamTC
from hwtLib.examples.mem.reg_test import DRegTC, RegSerializationTC, \
DoubleRRegTC, DReg_asyncRstTC
from hwtLib.examples.mem.rom_test import SimpleRomTC, SimpleSyncRomTC, \
RomResourcesTC
from hwtLib.examples.operators.cast_test import CastTc
from hwtLib.examples.operators.concat_test import ConcatTC
from hwtLib.examples.operators.indexing_test import IndexingTC
from hwtLib.examples.parametrization_test import ParametrizationTC
from hwtLib.examples.rtlLvl.rtlLvl_test import RtlLvlTC
from hwtLib.examples.showcase0_test import Showcase0TC
from hwtLib.examples.simple2withNonDirectIntConnection import \
Simple2withNonDirectIntConnectionTC
from hwtLib.examples.simpleAxiStream_test import SimpleUnitAxiStream_TC
from hwtLib.examples.simpleWithNonDirectIntConncetion import \
SimpleWithNonDirectIntConncetionTC
from hwtLib.examples.simpleWithParam import SimpleUnitWithParamTC
from hwtLib.examples.simple_test import SimpleTC
from hwtLib.examples.specialIntfTypes.intfWithArray import InterfaceWithArrayTypesTC
from hwtLib.examples.statements.codeBlockStm_test import CodeBlokStmTC
from hwtLib.examples.statements.constCondition import ConstConditionTC
from hwtLib.examples.statements.constDriver_test import ConstDriverTC
from hwtLib.examples.statements.forLoopCntrl_test import StaticForLoopCntrlTC
from hwtLib.examples.statements.fsm_test import FsmExampleTC, \
HadrcodedFsmExampleTC, FsmSerializationTC
from hwtLib.examples.statements.ifStm_test import IfStmTC
from hwtLib.examples.statements.switchStm_test import SwitchStmTC
from hwtLib.examples.statements.vldMaskConflictsResolving_test import \
VldMaskConflictsResolvingTC
from hwtLib.examples.timers import TimerTC
from hwtLib.handshaked.cdc_test import HandshakedCdc_slow_to_fast_TC, \
HandshakedCdc_fast_to_slow_TC
from hwtLib.handshaked.fifoAsync_test import HsFifoAsyncTC
from hwtLib.handshaked.fifo_test import HsFifoTC
from hwtLib.handshaked.handshakedToAxiStream_test import HandshakedToAxiStreamTCs
from hwtLib.handshaked.joinFair_test import HsJoinFair_2inputs_TC, \
HsJoinFair_3inputs_TC
from hwtLib.handshaked.joinPrioritized_test import HsJoinPrioritizedTC, \
HsJoinPrioritized_randomized_TC
from hwtLib.handshaked.ramAsHs_test import RamAsHs_TCs
from hwtLib.handshaked.reg_test import HandshakedRegTCs
from hwtLib.handshaked.resizer_test import HsResizerTC
from hwtLib.handshaked.splitCopy_test import HsSplitCopyTC, \
HsSplitCopy_randomized_TC
from hwtLib.img.charToBitmap_test import CharToBitmapTC
from hwtLib.logic.bcdToBin_test import BcdToBinTC
from hwtLib.logic.binToBcd_test import BinToBcdTC
from hwtLib.logic.binToOneHot import BinToOneHotTC
from hwtLib.logic.bitonicSorter import BitonicSorterTC
from hwtLib.logic.cntrGray import GrayCntrTC
from hwtLib.logic.countLeading_test import CountLeadingTC
from hwtLib.logic.crcComb_test import CrcCombTC
from hwtLib.logic.crcUtils_test import CrcUtilsTC
from hwtLib.logic.crc_test import CrcTC
from hwtLib.logic.lfsr import LfsrTC
from hwtLib.logic.oneHotToBin_test import OneHotToBinTC
from hwtLib.mem.atomic.flipCntr_test import FlipCntrTC
from hwtLib.mem.atomic.flipRam_test import FlipRamTC
from hwtLib.mem.atomic.flipReg_test import FlipRegTC
from hwtLib.mem.bramEndpoint_test import BramPortEndpointTCs
from hwtLib.mem.cam_test import CamTC
from hwtLib.mem.cuckooHashTableWithRam_test import CuckooHashTableWithRamTCs
from hwtLib.mem.fifoArray_test import FifoArrayTC
from hwtLib.mem.fifoAsync_test import FifoAsyncTC
from hwtLib.mem.fifo_test import FifoWriterAgentTC, FifoReaderAgentTC, FifoTC
from hwtLib.mem.hashTableCoreWithRam_test import HashTableCoreWithRamTC
from hwtLib.mem.lutRam_test import LutRamTC
from hwtLib.mem.ramTransactional_test import RamTransactionalTCs
from hwtLib.mem.ramXor_test import RamXorSingleClockTC
from hwtLib.mem.ram_test import RamTC
from hwtLib.peripheral.displays.hd44780.driver_test import Hd44780Driver8bTC
from hwtLib.peripheral.displays.segment7_test import Segment7TC
from hwtLib.peripheral.ethernet.mac_rx_test import EthernetMac_rx_TCs
from hwtLib.peripheral.ethernet.mac_tx_test import EthernetMac_tx_TCs
from hwtLib.peripheral.ethernet.rmii_adapter_test import RmiiAdapterTC
from hwtLib.peripheral.i2c.masterBitCntrl_test import I2CMasterBitCntrlTC
from hwtLib.peripheral.mdio.master_test import MdioMasterTC
from hwtLib.peripheral.spi.master_test import SpiMasterTC
from hwtLib.peripheral.uart.rx_test import UartRxTC, UartRxBasicTC
from hwtLib.peripheral.uart.tx_rx_test import UartTxRxTC
from hwtLib.peripheral.uart.tx_test import UartTxTC
from hwtLib.peripheral.usb.sim.usb_agent_test import UsbAgentTC
from hwtLib.peripheral.usb.sim.usbip.test import UsbipTCs
from hwtLib.peripheral.usb.usb2.device_cdc_vcp_test import Usb2CdcVcpTC
from hwtLib.peripheral.usb.usb2.sie_rx_test import Usb2SieDeviceRxTC
from hwtLib.peripheral.usb.usb2.sie_tx_test import Usb2SieDeviceTxTC
from hwtLib.peripheral.usb.usb2.ulpi_agent_test import UlpiAgent_TCs
from hwtLib.peripheral.usb.usb2.utmi_agent_test import UtmiAgentTCs
from hwtLib.peripheral.usb.usb2.utmi_to_ulpi_test import Utmi_to_UlpiTC
from hwtLib.structManipulators.arrayBuff_writer_test import ArrayBuff_writer_TC
from hwtLib.structManipulators.arrayItemGetter_test import ArrayItemGetterTC, \
ArrayItemGetter2in1WordTC
from hwtLib.structManipulators.cLinkedListReader_test import \
CLinkedListReaderTC
from hwtLib.structManipulators.cLinkedListWriter_test import \
CLinkedListWriterTC
from hwtLib.structManipulators.mmu2pageLvl_test import MMU_2pageLvl_TC
from hwtLib.structManipulators.structReader_test import StructReaderTC
from hwtLib.structManipulators.structWriter_test import StructWriter_TC
from hwtLib.tests.constraints.xdc_clock_related_test import ConstraintsXdcClockRelatedTC
from hwtLib.tests.frameTmpl_test import FrameTmplTC
from hwtLib.tests.pyUtils.arrayQuery_test import ArrayQueryTC
from hwtLib.tests.pyUtils.fileUtils_test import FileUtilsTC
from hwtLib.tests.rdSynced_agent_test import RdSynced_agent_TC
from hwtLib.tests.repr_of_hdlObjs_test import ReprOfHdlObjsTC
from hwtLib.tests.resourceAnalyzer_test import ResourceAnalyzer_TC
from hwtLib.tests.serialization.hdlReaname_test import SerializerHdlRename_TC
from hwtLib.tests.serialization.ipCorePackager_test import IpCorePackagerTC
from hwtLib.tests.serialization.modes_test import SerializerModes_TC
from hwtLib.tests.serialization.tmpVar_test import Serializer_tmpVar_TC
from hwtLib.tests.serialization.vhdl_test import Vhdl2008Serializer_TC
from hwtLib.tests.simulator.basicRtlSimulatorVcdTmpDirs_test import BasicRtlSimulatorVcdTmpDirs_TCs
from hwtLib.tests.simulator.json_log_test import HsFifoJsonLogTC
from hwtLib.tests.simulator.utils_test import SimulatorUtilsTC
from hwtLib.tests.structIntf_operator_test import StructIntf_operatorTC
from hwtLib.tests.synthesizer.astNodeIoReplacing_test import AstNodeIoReplacingTC
from hwtLib.tests.synthesizer.interfaceLevel.interfaceSynthesizerTC import \
InterfaceSynthesizerTC
from hwtLib.tests.synthesizer.interfaceLevel.subunitsSynthesisTC import \
SubunitsSynthesisTC
from hwtLib.tests.synthesizer.rtlLevel.basic_signal_methods_test import BasicSignalMethodsTC
from hwtLib.tests.synthesizer.rtlLevel.statements_consystency_test import StatementsConsystencyTC
from hwtLib.tests.synthesizer.statementTreesInternal_test import StatementTreesInternalTC
from hwtLib.tests.synthesizer.statementTrees_test import StatementTreesTC
from hwtLib.tests.synthesizer.statements_test import StatementsTC
from hwtLib.tests.transTmpl_test import TransTmpl_TC
from hwtLib.tests.types.bitsSlicing_test import BitsSlicingTC
from hwtLib.tests.types.hstructVal_test import HStructValTC
from hwtLib.tests.types.hvalue_test import HValueTC
from hwtLib.tests.types.operators_test import OperatorTC
from hwtLib.tests.types.union_test import UnionTC
from hwtLib.tests.unionIntf_test import UnionIntfTC
from hwtLib.xilinx.ipif.axi4Lite_to_ipif_test import Axi4Lite_to_IpifTC
from hwtLib.xilinx.ipif.buff_test import IpifBuffTC
from hwtLib.xilinx.ipif.endpoint_test import IpifEndpointTC, \
IpifEndpointDenseTC, IpifEndpointDenseStartTC, IpifEndpointArray
from hwtLib.xilinx.ipif.interconnectMatrix_test import IpifInterconnectMatrixTC
from hwtLib.xilinx.locallink.axis_conv_test import AxiS_localLinkConvTC
from hwtLib.xilinx.primitive.examples.dsp48e1Add_test import Dsp48e1Add_TCs
from hwtLib.xilinx.slr_crossing_test import HsSlrCrossingTC
# from hwt.simulator.simTestCase import SimTestCase
def testSuiteFromTCs(*tcs):
loader = TestLoader()
for tc in tcs:
if not issubclass(tc, SimTestCase):
tc._multiprocess_can_split_ = True
loadedTcs = [
loader.loadTestsFromTestCase(tc) for tc in tcs
# if not issubclass(tc, SimTestCase) # [debug] skip simulations
]
suite = TestSuite(loadedTcs)
return suite
suite = testSuiteFromTCs(
# basic tests
FileUtilsTC,
ArrayQueryTC,
RtlLvlTC,
ReprOfHdlObjsTC,
HdlCommentsTC,
InterfaceSynthesizerTC,
SubunitsSynthesisTC,
EmptyUnitWithSpiTC,
Simple2withNonDirectIntConnectionTC,
SimpleWithNonDirectIntConncetionTC,
SimpleSubunit3TC,
UnitToUnitConnectionTC,
OperatorTC,
StructIntf_operatorTC,
CastTc,
BitsSlicingTC,
HStructValTC,
ParametrizationTC,
BasicSignalMethodsTC,
StatementsConsystencyTC,
HValueTC,
StatementTreesInternalTC,
StatementTreesTC,
StatementsTC,
AstNodeIoReplacingTC,
ErrorsTC,
StaticForLoopCntrlTC,
SimpleUnitWithParamTC,
SimpleSubunit2TC,
HierarchySerializationTC,
ListOfInterfacesSample0TC,
ListOfInterfacesSample1TC,
ListOfInterfacesSample2TC,
ListOfInterfacesSample3TC,
ListOfInterfacesSample4TC,
PrivateSignalsOfStructTypeTC,
FrameTmplTC,
Showcase0TC,
SimulatorUtilsTC,
HsFifoJsonLogTC,
RdSynced_agent_TC,
Segment7TC,
SerializerModes_TC,
Serializer_tmpVar_TC,
SerializerHdlRename_TC,
VhdlVectorAutoCastExampleTC,
TransTmpl_TC,
UnionTC,
UnionIntfTC,
ResourceAnalyzer_TC,
CombLoopAnalysisTC,
Vhdl2008Serializer_TC,
CodeBlokStmTC,
IfStmTC,
SwitchStmTC,
SimpleRomTC,
SimpleSyncRomTC,
RomResourcesTC,
DRegTC,
DoubleRRegTC,
DReg_asyncRstTC,
RegSerializationTC,
CntrTC,
CntrResourceAnalysisTC,
ConstConditionTC,
TemplateConfigured_TC,
FrameAlignmentUtilsTC,
FrameJoinUtilsTC,
HwExceptionCatch_TC,
PseudoLru_TC,
# tests of simple units
TimerTC,
ConcatTC,
VldMaskConflictsResolvingTC,
ConstDriverTC,
WidthCastingExampleTC,
SimpleTC,
SimpleSubunitTC,
RamTC,
RamXorSingleClockTC,
*RamTransactionalTCs,
BramWireTC,
LutRamTC,
FsmSerializationTC,
FsmExampleTC,
HadrcodedFsmExampleTC,
OneHotToBinTC,
BinToBcdTC,
BcdToBinTC,
AxiS_strFormat_TC,
BinToOneHotTC,
GrayCntrTC,
TwoCntrsTC,
SelfRefCntrTC,
CountLeadingTC,
MultiplierBoothTC,
IndexingTC,
CdcTC,
RamResourcesTC,
SimpleAsyncRamTC,
SimpleSyncRamTC,
SimpleUnitAxiStream_TC,
FifoWriterAgentTC,
FifoReaderAgentTC,
FifoTC,
FifoAsyncTC,
FifoArrayTC,
HsJoinPrioritizedTC,
HsJoinPrioritized_randomized_TC,
HsJoinFair_2inputs_TC,
HsJoinFair_3inputs_TC,
HandshakedCdc_slow_to_fast_TC,
HandshakedCdc_fast_to_slow_TC,
*HandshakedToAxiStreamTCs,
*RamAsHs_TCs,
LfsrTC,
BitonicSorterTC,
InterfaceWithArrayTypesTC,
FlipRegTC,
FlipCntrTC,
FlipRamTC,
HsSplitCopyTC,
HsSplitCopy_randomized_TC,
HsFifoTC,
HsFifoAsyncTC,
*HandshakedRegTCs,
HsResizerTC,
HsBuilderSplit_TC,
CamTC,
UartTxTC,
UartRxBasicTC,
UartRxTC,
UartTxRxTC,
SpiMasterTC,
I2CMasterBitCntrlTC,
*EthernetMac_rx_TCs,
*EthernetMac_tx_TCs,
MdioMasterTC,
Hd44780Driver8bTC,
CrcUtilsTC,
CrcCombTC,
CrcTC,
UsbAgentTC,
*UlpiAgent_TCs,
*UtmiAgentTCs,
Utmi_to_UlpiTC,
Usb2SieDeviceRxTC,
Usb2SieDeviceTxTC,
Usb2CdcVcpTC,
*UsbipTCs,
BusEndpointTC,
*BramPortEndpointTCs,
# avalon tests
AvalonMmAgentTC,
*AvalonMmEndpointTCs,
AvalonMmBram_TC,
*AxiToAvalonMm_TCs,
AvalonStAgentTC,
AvalonMmBuff_TC,
# axi tests
SimpleAxiRegsTC,
AxiTC,
*AxiLiteEndpointTCs,
*AxiLiteEndpointArrTCs,
AxiLiteEndpoint_struct_TC,
AxiLiteEndpoint_arrayStruct_TC,
AxiLiteEndpoint_fromInterfaceTC,
AxiLiteEndpoint_fromInterface_arr_TC,
AxiLite_to_Axi_TC,
Axi_to_AxiLite_TC,
AxiRegTC,
AxiTesterTC,
*AxiStaticRemapTCs,
AxiResizeTC,
AxisFrameGenTC,
*AddrDataHs_to_Axi_TCs,
Axi4BRam_TC,
*Axi_rDatapump_alignedTCs,
*Axi_rDatapump_unalignedTCs,
*Axi_wDatapumpTCs,
AxiSlaveTimeoutTC,
AxiSStoredBurstTC,
AxiS_en_TC,
AxiS_fifoMeasuringTC,
AxiSFifoDropTC,
*AxiS_resizer_TCs,
AxiS_frameDeparser_TC,
AxiS_localLinkConvTC,
AxiS_footerSplitTC,
AxiS_frameParserTC,
*AxiS_FrameJoin_TCs,
HandshakedBuilderSimpleTC,
*EthAddrUpdaterTCs,
RStrictOrderInterconnectTC,
WStrictOrderInterconnectTC,
WStrictOrderInterconnect2TC,
WStrictOrderInterconnectComplexTC,
*AxiInterconnectMatrixAddrCrossbar_TCs,
*AxiInterconnectMatrixCrossbar_TCs,
*AxiInterconnectMatrixR_TCs,
*AxiInterconnectMatrixW_TCs,
*AxiWriteAggregator_TCs,
*AxiReadAggregator_TCs,
*AxiStoreQueueWritePropagating_TCs,
*AxiCacheWriteAllocWawOnlyWritePropagatingTCs,
Axi_ag_TC,
Axi4_streamToMemTC,
ArrayItemGetterTC,
ArrayItemGetter2in1WordTC,
ArrayBuff_writer_TC,
CLinkedListReaderTC,
CLinkedListWriterTC,
MMU_2pageLvl_TC,
StructWriter_TC,
StructReaderTC,
*OooOpExampleCounterArray_TCs,
OooOpExampleCounterHashTable_TC,
# ipif tests
IpifEndpointTC,
IpifEndpointDenseTC,
IpifEndpointDenseStartTC,
IpifEndpointArray,
IpifBuffTC,
Axi4Lite_to_IpifTC,
IpifInterconnectMatrixTC,
Mi32AgentTC,
Mi32InterconnectMatrixTC,
Mi32_to_Axi4LiteTC,
Mi32Axi4LiteBrigesTC,
Mi32SlidingWindowTC,
*Mi32EndpointTCs,
# complex units tests
UnitWrapperTC,
IpCorePackagerTC,
CharToBitmapTC,
HashTableCoreWithRamTC,
*CuckooHashTableWithRamTCs,
PingResponderTC,
DebugBusMonitorExampleAxiTC,
RmiiAdapterTC,
ConstraintsXdcClockRelatedTC,
HsSlrCrossingTC,
*Dsp48e1Add_TCs,
*BasicRtlSimulatorVcdTmpDirs_TCs,
)
def main():
# runner = TextTestRunner(verbosity=2, failfast=True)
runner = TextTestRunner(verbosity=2)
try:
from concurrencytest import ConcurrentTestSuite, fork_for_tests
useParallerlTest = True
except ImportError:
# concurrencytest is not installed, use regular test runner
useParallerlTest = False
# useParallerlTest = False
if useParallerlTest:
concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
res = runner.run(concurrent_suite)
else:
res = runner.run(suite)
if not res.wasSuccessful():
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
alexlo03/ansible | test/units/modules/network/f5/test_bigip_gtm_global.py | 8 | 4025 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_global import ApiParameters
from library.modules.bigip_gtm_global import ModuleParameters
from library.modules.bigip_gtm_global import ModuleManager
from library.modules.bigip_gtm_global import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_global import ApiParameters
from ansible.modules.network.f5.bigip_gtm_global import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_global import ModuleManager
from ansible.modules.network.f5.bigip_gtm_global import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
synchronization=True,
synchronization_group_name='foo',
synchronize_zone_files=True
)
p = ModuleParameters(params=args)
assert p.synchronization is True
assert p.synchronization_group_name == 'foo'
assert p.synchronize_zone_files is True
def test_api_parameters(self):
args = load_fixture('load_gtm_global_settings_general_1.json')
p = ApiParameters(params=args)
assert p.synchronization is False
assert p.synchronization_group_name == 'default'
assert p.synchronize_zone_files is False
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def update(self, *args):
set_module_args(dict(
synchronization="yes",
synchronization_group_name='foo',
synchronize_zone_files="yes",
server='localhost',
password='password',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_global_settings_general_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['synchronization'] == 'yes'
assert results['synchronization_group_name'] == 'foo'
assert results['synchronize_zone_files'] == 'yes'
| gpl-3.0 |
zeraien/comcon | ampcon/ampcon.py | 1 | 1866 | import yaml
import os
from flask import Flask, render_template, jsonify, request
from amplifier import Amplifier, SOURCES
app = Flask(__name__)
with open(os.path.join(os.path.dirname(__file__),"config.yaml")) as f:
config = yaml.load(f)
amplifier_obj = Amplifier(serial_port=config["serial_port"], logger=app.logger)
@app.context_processor
def inject_user():
if not amplifier_obj.configured:
amplifier_obj.configure()
return {
'sources': SOURCES
}
@app.route('/')
def hello_world():
amplifier_obj.configured = False
return render_template('index.html')
@app.route('/:volume')
def volume_change():
step = int(request.args.get('step'))
amplifier_obj.volume_change(step)
return jsonify(amplifier_obj.json_ready())
@app.route('/:volume_percent/<int:percent>')
def volume_percent(percent):
amplifier_obj.set_volume_percent(percent)
return jsonify(amplifier_obj.json_ready())
@app.route('/:volume_calibrate')
def volume_calibrate():
amplifier_obj.calibrate_volume()
return jsonify(amplifier_obj.json_ready())
@app.route("/:status")
def status():
return jsonify(amplifier_obj.json_ready())
@app.route("/:set_source")
def source():
new_source = request.args.get('source')
amplifier_obj.set_source(new_source)
return jsonify(amplifier_obj.json_ready())
@app.route('/:mute')
def mute():
amplifier_obj.mute_toggle()
return jsonify(amplifier_obj.json_ready())
@app.route('/:power')
def power():
amplifier_obj.power_toggle()
return jsonify(amplifier_obj.json_ready())
@app.route('/:spk/<speaker>')
def toggle_speaker(speaker):
amplifier_obj.speaker_toggle(speaker)
return jsonify(amplifier_obj.json_ready())
@app.errorhandler(500)
def page_not_found(e):
return "Error: %s" % e
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| gpl-2.0 |
linostar/timeline-clone | test/specs/utils.py | 1 | 17794 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import os.path
import random
import shutil
import sys
import tempfile
import traceback
import unittest
import wx.lib.inspection
from timelinelib.calendar.gregorian import Gregorian
from timelinelib.calendar.monthnames import ABBREVIATED_ENGLISH_MONTH_NAMES
from timelinelib.config.arguments import ApplicationArguments
from timelinelib.config.dotfile import read_config
from timelinelib.data import Category
from timelinelib.data import Container
from timelinelib.data import Event
from timelinelib.data import Subevent
from timelinelib.data import TimePeriod
from timelinelib.db import db_open
from timelinelib.time.gregoriantime import GregorianTimeType
from timelinelib.time.timeline import delta_from_days
from timelinelib.time.timeline import TimeDelta
from timelinelib.wxgui.setup import start_wx_application
ANY_TIME = "1 Jan 2010"
def gregorian_period(start, end):
return TimePeriod(GregorianTimeType(), human_time_to_gregorian(start), human_time_to_gregorian(end))
def human_time_to_gregorian(human_time):
(year, month, day, hour, minute) = human_time_to_ymdhm(human_time)
return Gregorian(year, month, day, hour, minute, 0).to_time()
def a_time_period():
year = random.randint(1, 4000)
month = random.randint(1, 12)
day = random.randint(1,28)
end_year = year + random.randint(1, 5)
end_month = random.randint(1, 12)
end_day = random.randint(1,28)
return TimePeriod(GregorianTimeType(),
Gregorian(year, month, day, 0, 0, 0).to_time(),
Gregorian(end_year, end_month, end_day, 0, 0, 0).to_time())
def human_time_to_ymdhm(human_time):
parts = human_time.split(" ")
day_part, month_part, year_part = parts[0], parts[1], parts[2]
day = int(day_part)
month = ABBREVIATED_ENGLISH_MONTH_NAMES.index(month_part) + 1
year = int(year_part)
if len(parts) == 4:
hour = int(parts[3][:2])
minute = int(parts[3][3:5])
else:
hour = 0
minute = 0
return (year, month, day, hour, minute)
def an_event():
return an_event_with(time=ANY_TIME)
def an_event_with(start=None, end=None, time=ANY_TIME, text="foo", fuzzy=False,
locked=False, ends_today=False, category=None):
if start and end:
start = human_time_to_gregorian(start)
end = human_time_to_gregorian(end)
else:
start = human_time_to_gregorian(time)
end = human_time_to_gregorian(time)
return Event(
GregorianTimeType(), start, end, text, category=category,
fuzzy=fuzzy, locked=locked, ends_today=ends_today)
def a_subevent():
return a_subevent_with()
def a_subevent_with(start=None, end=None, time=ANY_TIME, text="sub", category=None, container=None, cid=-1):
if start and end:
start = human_time_to_gregorian(start)
end = human_time_to_gregorian(end)
else:
start = human_time_to_gregorian(time)
end = human_time_to_gregorian(time)
return Subevent(GregorianTimeType(), start, end, text, category=category, container=container, cid=cid)
def a_container(name, category, sub_events):
cid = 99
start = human_time_to_gregorian(ANY_TIME)
end = human_time_to_gregorian(ANY_TIME)
container = Container(GregorianTimeType(), start, end, name,
category=category, cid=cid)
all_events = []
all_events.append(container)
for (name, category) in sub_events:
all_events.append(Subevent(GregorianTimeType(), start, end, name,
category=category, container=container))
return all_events
def a_container_with(text="container", category=None, cid=-1):
start = human_time_to_gregorian(ANY_TIME)
end = human_time_to_gregorian(ANY_TIME)
container = Container(GregorianTimeType(), start, end, text, category=category, cid=cid)
return container
def a_category():
return a_category_with(name="category")
def a_category_with(name, color=(255, 0, 0), font_color=(0, 255, 255),
parent=None):
return Category(name=name, color=color, font_color=font_color,
parent=parent)
def get_random_modifier(modifiers):
return random.choice(modifiers)
def inc(number):
if number is None:
return 8
else:
return number + 1
def new_cat(event):
if event.get_category() is None:
return a_category_with(name="new category")
else:
return a_category_with(name="was: %s" % event.get_category().get_name())
def new_parent(category):
if category.get_parent() is None:
return a_category_with(name="new category")
else:
return a_category_with(name="was: %s" % category.get_parent().get_name())
def new_time_type(event):
if event.get_time_type() is None:
return GregorianTimeType()
else:
return None
def new_progress(event):
if event.get_progress() is None:
return 8
else:
return (event.get_progress() + 1) % 100
def modifier_change_ends_today(event):
if event.get_locked():
event.set_locked(False)
event.set_ends_today(not event.get_ends_today())
event.set_locked(True)
else:
event.set_ends_today(not event.get_ends_today())
return event
EVENT_MODIFIERS = [
("change time type", lambda event:
event.set_time_type(new_time_type(event))),
("change fuzzy", lambda event:
event.set_fuzzy(not event.get_fuzzy())),
("change locked", lambda event:
event.set_locked(not event.get_locked())),
("change ends today", modifier_change_ends_today),
("change id", lambda event:
event.set_id(inc(event.get_id()))),
("change time period", lambda event:
event.set_time_period(event.get_time_period().move_delta(delta_from_days(1)))),
("change text", lambda event:
event.set_text("was: %s" % event.get_text())),
("change category", lambda event:
event.set_category(new_cat(event))),
("change icon", lambda event:
event.set_icon("was: %s" % event.get_icon())),
("change description", lambda event:
event.set_description("was: %s" % event.get_description())),
("change hyperlink", lambda event:
event.set_hyperlink("was: %s" % event.get_hyperlink())),
("change progress", lambda event:
event.set_progress(new_progress(event))),
("change alert", lambda event:
event.set_alert("was: %s" % event.get_alert())),
]
SUBEVENT_MODIFIERS = [
("change container id", lambda event:
event.set_container_id(event.get_container_id()+1)),
] + EVENT_MODIFIERS
CONTAINER_MODIFIERS = [
("change container id", lambda event:
event.set_cid(event.cid()+1)),
] + EVENT_MODIFIERS
CATEGORY_MODIFIERS = [
("change name", lambda category:
category.set_name("was: %s" % category.get_name())),
("change id", lambda category:
category.set_id(inc(category.get_id()))),
("change color", lambda category:
category.set_color(category.get_color()+(1, 0, 3))),
("change font color", lambda category:
category.set_font_color(category.get_font_color()+(1, 0, 3))),
("change parent", lambda category:
category.set_parent(new_parent(category))),
]
TIME_PERIOD_MODIFIERS = [
("zoom", lambda time_period:
time_period.zoom(-1)),
("extend left", lambda time_period:
time_period.update(time_period.start_time-time_period.time_type.get_min_zoom_delta()[0],
time_period.end_time)),
("extend right", lambda time_period:
time_period.update(time_period.start_time,
time_period.end_time+time_period.time_type.get_min_zoom_delta()[0])),
]
TIME_MODIFIERS = [
("add", lambda time: time + TimeDelta(1)),
]
class TestCase(unittest.TestCase):
def assertListIsCloneOf(self, cloned_list, original_list):
self.assertEqual(cloned_list, original_list)
self.assertTrue(cloned_list is not original_list)
for i in range(len(cloned_list)):
self.assertIsCloneOf(cloned_list[i], original_list[i])
def assertIsCloneOf(self, clone, original):
self.assertEqual(clone, original)
self.assertTrue(clone is not original, "%r" % clone)
def assertInstanceNotIn(self, object_, list_):
for element in list_:
if element is object_:
self.fail("%r was in list" % object_)
def assertEqNeImplementationIsCorrect(self, create_fn, modifiers):
(modification_description, modifier_fn) = get_random_modifier(modifiers)
one = modifier_fn(create_fn())
other = modifier_fn(create_fn())
fail_message_one_other = "%r vs %r (%s)" % (one, other,
modification_description)
self.assertTrue(type(one) == type(other), fail_message_one_other)
self.assertFalse(one == None, fail_message_one_other)
self.assertTrue(one != None, fail_message_one_other)
self.assertTrue(one is not other, fail_message_one_other)
self.assertFalse(one is other, fail_message_one_other)
self.assertTrue(one == other, fail_message_one_other)
self.assertFalse(one != other, fail_message_one_other)
self.assertTrue(one == one, fail_message_one_other)
self.assertFalse(one != one, fail_message_one_other)
(modification_description, modifier_fn) = get_random_modifier(modifiers)
modified = modifier_fn(other)
fail_message_modified_one = "%r vs %r (%s)" % (modified, one,
modification_description)
self.assertTrue(type(modified) == type(one), fail_message_modified_one)
self.assertTrue(modified is not one, fail_message_modified_one)
self.assertFalse(modified is one, fail_message_modified_one)
self.assertTrue(modified != one, fail_message_modified_one)
self.assertFalse(modified == one, fail_message_modified_one)
class TmpDirTestCase(TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix="timeline-test")
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def get_tmp_path(self, name):
return os.path.join(self.tmp_dir, name)
class WxComponentTest(TestCase):
def setUp(self):
self._app = wx.App(False)
self._main_frame = wx.Frame(None)
self._main_frame.Bind(wx.EVT_CLOSE, self._main_frame_on_close)
self._main_panel = wx.Panel(self._main_frame)
self._components = []
self._component_by_name = {}
self._is_close_called = False
def tearDown(self):
self._close()
def add_component(self, name, cls, *args):
self._component_by_name[name] = cls(self._main_panel, *args)
self._components.append(self._component_by_name[name])
def add_button(self, text, callback, component_name=None):
button = wx.Button(self._main_panel, label=text)
self._components.append(button)
def event_listener(event):
if component_name:
callback(self.get_component(component_name))
else:
callback()
button.Bind(wx.EVT_BUTTON, event_listener)
def add_separator(self):
label = "----- separator -----"
self._components.append(wx.StaticText(self._main_panel, label=label))
def get_component(self, name):
return self._component_by_name[name]
def show_test_window(self):
sizer = wx.BoxSizer(wx.VERTICAL)
for component in self._components:
sizer.Add(component, flag=wx.ALL|wx.GROW, border=3)
self._main_panel.SetSizer(sizer)
self._main_frame.Show()
if not self.HALT_FOR_MANUAL_INSPECTION:
wx.CallAfter(self._close)
self._app.MainLoop()
def _main_frame_on_close(self, event):
self._is_close_called = True
self._main_frame.Destroy()
def _close(self):
if not self._is_close_called:
self._main_frame.Close()
self._is_close_called = True
class WxEndToEndTestCase(TmpDirTestCase):
def setUp(self):
TmpDirTestCase.setUp(self)
self.timeline_path = self.get_tmp_path("test.timeline")
self.config_file_path = self.get_tmp_path("thetimelineproj.cfg")
self.config = read_config(self.config_file_path)
self.standard_excepthook = sys.excepthook
self.error_in_gui_thread = None
def tearDown(self):
TmpDirTestCase.tearDown(self)
sys.excepthook = self.standard_excepthook
def start_timeline_and(self, steps_to_perform_in_gui):
self.config.write()
self.steps_to_perform_in_gui = steps_to_perform_in_gui
application_arguments = ApplicationArguments()
application_arguments.parse_from(
["--config-file", self.config_file_path, self.timeline_path])
start_wx_application(application_arguments, self._before_main_loop_hook)
if self.error_in_gui_thread:
exc_type, exc_value, exc_traceback = self.error_in_gui_thread
a = traceback.format_exception(exc_type, exc_value, exc_traceback)
self.fail("Exception in GUI thread: %s" % "".join(a))
def read_written_timeline(self):
return db_open(self.timeline_path)
def _before_main_loop_hook(self):
sys.excepthook = self.standard_excepthook
self._setup_steps_to_perform_in_gui(self.steps_to_perform_in_gui)
def _setup_steps_to_perform_in_gui(self, steps, in_sub_step_mode=False):
def perform_current_step_and_queue_next():
if len(steps) >= 2 and isinstance(steps[1], list):
self._setup_steps_to_perform_in_gui(steps[1], True)
next_step_index = 2
else:
next_step_index = 1
try:
steps[0]()
except Exception:
wx.GetApp().GetTopWindow().Close()
self.error_in_gui_thread = sys.exc_info()
else:
if steps[0] != self.show_widget_inspector:
self._setup_steps_to_perform_in_gui(steps[next_step_index:], in_sub_step_mode)
if len(steps) > 0:
wx.CallAfter(perform_current_step_and_queue_next)
elif not in_sub_step_mode:
wx.CallAfter(wx.GetApp().GetTopWindow().Close)
def show_widget_inspector(self):
wx.lib.inspection.InspectionTool().Show()
def click_menu_item(self, item_path):
def click():
item_names = [_(x) for x in item_path.split(" -> ")]
menu_bar = wx.GetApp().GetTopWindow().GetMenuBar()
menu = menu_bar.GetMenu(menu_bar.FindMenu(item_names[0]))
for sub in item_names[1:]:
menu = menu_bar.FindItemById(menu.FindItem(sub))
wx.GetApp().GetTopWindow().ProcessEvent(
wx.CommandEvent(wx.EVT_MENU.typeId, menu.GetId()))
return click
def click_button(self, component_path):
def click():
component = self.find_component(component_path)
component.ProcessEvent(wx.CommandEvent(wx.EVT_BUTTON.typeId, component.GetId()))
return click
def enter_text(self, component_path, text):
def enter():
self.find_component(component_path).SetValue(text)
return enter
def find_component(self, component_path):
components_to_search_in = wx.GetTopLevelWindows()
for component_name in component_path.split(" -> "):
component = self._find_component_with_name_in(
components_to_search_in, component_name)
if component == None:
self.fail("Could not find component with path '%s'." % component_path)
else:
components_to_search_in = component.GetChildren()
return component
def _find_component_with_name_in(self, components, seeked_name):
for component in components:
if self._matches_seeked_name(component, seeked_name):
return component
for component in components:
sub = self._find_component_with_name_in(component.GetChildren(), seeked_name)
if sub:
return sub
return None
def _matches_seeked_name(self, component, seeked_name):
if component.GetName() == seeked_name:
return True
elif component.GetId() == self._wx_id_from_name(seeked_name):
return True
elif hasattr(component, "GetLabelText") and component.GetLabelText() == _(seeked_name):
return True
elif component.GetLabel() == _(seeked_name):
return True
return False
def _wx_id_from_name(self, name):
if name.startswith("wxID_"):
return getattr(wx, name[2:])
return None
class ObjectWithTruthValue(object):
def __init__(self, truth_value):
self.truth_value = truth_value
def __nonzero__(self):
return self.truth_value
| gpl-3.0 |
ESSolutions/ESSArch_Core | ESSArch_Core/WorkflowEngine/__init__.py | 1 | 1392 | """
ESSArch is an open source archiving and digital preservation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
import logging
import celery
default_app_config = 'ESSArch_Core.WorkflowEngine.apps.WorkflowEngineConfig'
logger = logging.getLogger('essarch.workflowengine')
def get_workers(rabbitmq):
if rabbitmq.get('error'):
logger.error("RabbitMQ seems down. Wont get stats of celery workers.")
return None
try:
return celery.current_app.control.inspect().stats()
except Exception:
logger.exception("Error when checking stats of celery workers.")
return None
| gpl-3.0 |
karlito40/servo | components/script/dom/bindings/codegen/parser/tests/test_nullable_equivalency.py | 106 | 3835 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestNullableEquivalency1 {
attribute long a;
attribute long? b;
};
interface TestNullableEquivalency2 {
attribute ArrayBuffer a;
attribute ArrayBuffer? b;
};
/* Can't have dictionary-valued attributes, so can't test that here */
enum TestNullableEquivalency4Enum {
"Foo",
"Bar"
};
interface TestNullableEquivalency4 {
attribute TestNullableEquivalency4Enum a;
attribute TestNullableEquivalency4Enum? b;
};
interface TestNullableEquivalency5 {
attribute TestNullableEquivalency4 a;
attribute TestNullableEquivalency4? b;
};
interface TestNullableEquivalency6 {
attribute boolean a;
attribute boolean? b;
};
interface TestNullableEquivalency7 {
attribute DOMString a;
attribute DOMString? b;
};
/* Not implemented. */
/*interface TestNullableEquivalency8 {
attribute float a;
attribute float? b;
};*/
interface TestNullableEquivalency8 {
attribute double a;
attribute double? b;
};
interface TestNullableEquivalency9 {
attribute object a;
attribute object? b;
};
interface TestNullableEquivalency10 {
attribute double[] a;
attribute double[]? b;
};
interface TestNullableEquivalency11 {
attribute TestNullableEquivalency9[] a;
attribute TestNullableEquivalency9[]? b;
};
""")
for decl in parser.finish():
if decl.isInterface():
checkEquivalent(decl, harness)
def checkEquivalent(iface, harness):
type1 = iface.members[0].type
type2 = iface.members[1].type
harness.check(type1.nullable(), False, 'attr1 should not be nullable')
harness.check(type2.nullable(), True, 'attr2 should be nullable')
# We don't know about type1, but type2, the nullable type, definitely
# shouldn't be builtin.
harness.check(type2.builtin, False, 'attr2 should not be builtin')
# Ensure that all attributes of type2 match those in type1, except for:
# - names on an ignore list,
# - names beginning with '_',
# - functions which throw when called with no args, and
# - class-level non-callables ("static variables").
#
# Yes, this is an ugly, fragile hack. But it finds bugs...
for attr in dir(type1):
if attr.startswith('_') or \
attr in ['nullable', 'builtin', 'filename', 'location',
'inner', 'QName'] or \
(hasattr(type(type1), attr) and not callable(getattr(type1, attr))):
continue
a1 = getattr(type1, attr)
if callable(a1):
try:
v1 = a1()
except:
# Can't call a1 with no args, so skip this attriute.
continue
try:
a2 = getattr(type2, attr)
except:
harness.ok(False, 'Missing %s attribute on type %s in %s' % (attr, type2, iface))
continue
if not callable(a2):
harness.ok(False, "%s attribute on type %s in %s wasn't callable" % (attr, type2, iface))
continue
v2 = a2()
harness.check(v2, v1, '%s method return value' % attr)
else:
try:
a2 = getattr(type2, attr)
except:
harness.ok(False, 'Missing %s attribute on type %s in %s' % (attr, type2, iface))
continue
harness.check(a2, a1, '%s attribute should match' % attr)
| mpl-2.0 |
pdellaert/ansible | lib/ansible/plugins/action/package.py | 36 | 3474 | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
''' handler for package operations '''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto')
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template('{{ansible_facts.pkg_mgr}}')
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), task_vars=task_vars)
display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto')
if module != 'auto':
if module not in self._shared_loader_obj.module_loader:
raise AnsibleActionFail('Could not find a module for %s.' % module)
else:
# run the 'package' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
# get defaults for specific module
new_module_args = get_action_args_with_defaults(module, new_module_args, self._task.module_defaults, self._templar)
display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| gpl-3.0 |
chokribr/invenioold | modules/websession/lib/inveniogc.py | 9 | 30780 | ## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio garbage collector.
"""
__revision__ = "$Id$"
import sys
import datetime
import time
import os
try:
from invenio.dbquery import run_sql, wash_table_column_name
from invenio.config import CFG_LOGDIR, CFG_TMPDIR, CFG_CACHEDIR, \
CFG_TMPSHAREDDIR, CFG_WEBSEARCH_RSS_TTL, CFG_PREFIX, \
CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS
from invenio.bibtask import task_init, task_set_option, task_get_option, \
write_message, write_messages
from invenio.bibtask_config import CFG_BIBSCHED_LOGDIR
from invenio.access_control_mailcookie import mail_cookie_gc
from invenio.bibdocfile import BibDoc
from invenio.bibsched import gc_tasks
from invenio.websubmit_config import CFG_WEBSUBMIT_TMP_VIDEO_PREFIX
from invenio.dateutils import convert_datestruct_to_datetext
except ImportError, e:
print "Error: %s" % (e,)
sys.exit(1)
# Add trailing slash to CFG_TMPSHAREDDIR, for find command to work
# with symlinks
CFG_TMPSHAREDDIR = CFG_TMPSHAREDDIR + os.sep
# configure variables
CFG_MYSQL_ARGUMENTLIST_SIZE = 100
# After how many days to remove obsolete log/err files
CFG_MAX_ATIME_RM_LOG = 28
# After how many days to zip obsolete log/err files
CFG_MAX_ATIME_ZIP_LOG = 7
# After how many days to remove obsolete bibreformat fmt xml files
CFG_MAX_ATIME_RM_FMT = 28
# After how many days to zip obsolete bibreformat fmt xml files
CFG_MAX_ATIME_ZIP_FMT = 7
# After how many days to remove obsolete oaiharvest fmt xml files
CFG_MAX_ATIME_RM_OAI = 14
# After how many days to zip obsolete oaiharvest fmt xml files
CFG_MAX_ATIME_ZIP_OAI = 3
# After how many days to remove deleted bibdocs
CFG_DELETED_BIBDOC_MAXLIFE = 365 * 10
# After how many day to remove old cached webjournal files
CFG_WEBJOURNAL_TTL = 7
# After how many days to zip obsolete bibsword xml log files
CFG_MAX_ATIME_ZIP_BIBSWORD = 7
# After how many days to remove obsolete bibsword xml log files
CFG_MAX_ATIME_RM_BIBSWORD = 28
# After how many days to remove temporary video uploads
CFG_MAX_ATIME_WEBSUBMIT_TMP_VIDEO = 3
# After how many days to remove obsolete refextract xml output files
CFG_MAX_ATIME_RM_REFEXTRACT = 7
# After how many days to remove obsolete bibdocfiles temporary files
CFG_MAX_ATIME_RM_BIBDOC = 4
# After how many days to remove obsolete WebSubmit-created temporary
# icon files
CFG_MAX_ATIME_RM_ICON = 7
# After how many days to remove obsolete WebSubmit-created temporary
# stamp files
CFG_MAX_ATIME_RM_STAMP = 7
# After how many days to remove obsolete WebJournal-created update XML
CFG_MAX_ATIME_RM_WEBJOURNAL_XML = 7
# After how many days to remove obsolete temporary files attached with
# the CKEditor in WebSubmit context?
CFG_MAX_ATIME_RM_WEBSUBMIT_CKEDITOR_FILE = 28
# After how many days to remove obsolete temporary files related to BibEdit
# cache
CFG_MAX_ATIME_BIBEDIT_TMP = 3
# After how many days to remove submitted XML files related to BibEdit
CFG_MAX_ATIME_BIBEDIT_XML = 3
def gc_exec_command(command):
""" Exec the command logging in appropriate way its output."""
write_message(' %s' % command, verbose=9)
(dummy, output, errors) = os.popen3(command)
write_messages(errors.read())
write_messages(output.read())
def clean_logs():
""" Clean the logs from obsolete files. """
write_message("""CLEANING OF LOG FILES STARTED""")
write_message("- deleting/gzipping bibsched empty/old err/log "
"BibSched files")
vstr = task_get_option('verbose') > 1 and '-v' or ''
gc_exec_command('find %s -name "bibsched_task_*"'
' -size 0c -exec rm %s -f {} \;' \
% (CFG_BIBSCHED_LOGDIR, vstr))
gc_exec_command('find %s -name "bibsched_task_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_BIBSCHED_LOGDIR, CFG_MAX_ATIME_RM_LOG, vstr))
gc_exec_command('find %s -name "bibsched_task_*"'
' -atime +%s -exec gzip %s -9 {} \;' \
% (CFG_BIBSCHED_LOGDIR, CFG_MAX_ATIME_ZIP_LOG, vstr))
write_message("""CLEANING OF LOG FILES FINISHED""")
def clean_tempfiles():
""" Clean old temporary files. """
write_message("""CLEANING OF TMP FILES STARTED""")
write_message("- deleting/gzipping temporary empty/old "
"BibReformat xml files")
vstr = task_get_option('verbose') > 1 and '-v' or ''
gc_exec_command('find %s %s -name "rec_fmt_*"'
' -size 0c -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, vstr))
gc_exec_command('find %s %s -name "rec_fmt_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_FMT, vstr))
gc_exec_command('find %s %s -name "rec_fmt_*"'
' -atime +%s -exec gzip %s -9 {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_ZIP_FMT, vstr))
write_message("- deleting/gzipping temporary old "
"OAIHarvest xml files")
gc_exec_command('find %s %s -name "oaiharvestadmin.*"'
' -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, vstr))
gc_exec_command('find %s %s -name "bibconvertrun.*"'
' -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, vstr))
# Using mtime and -r here to include directories.
gc_exec_command('find %s %s -name "oaiharvest*"'
' -mtime +%s -exec gzip %s -9 {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_ZIP_OAI, vstr))
gc_exec_command('find %s %s -name "oaiharvest*"'
' -mtime +%s -exec rm %s -rf {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_OAI, vstr))
gc_exec_command('find %s %s -name "oai_archive*"'
' -mtime +%s -exec rm %s -rf {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_OAI, vstr))
write_message("- deleting/gzipping temporary old "
"BibSword files")
gc_exec_command('find %s %s -name "bibsword_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_BIBSWORD, vstr))
gc_exec_command('find %s %s -name "bibsword_*"'
' -atime +%s -exec gzip %s -9 {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_ZIP_BIBSWORD, vstr))
# DELETE ALL FILES CREATED DURING VIDEO SUBMISSION
write_message("- deleting old video submissions")
gc_exec_command('find %s -name %s* -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPSHAREDDIR, CFG_WEBSUBMIT_TMP_VIDEO_PREFIX,
CFG_MAX_ATIME_WEBSUBMIT_TMP_VIDEO, vstr))
write_message("- deleting temporary old "
"RefExtract files")
gc_exec_command('find %s %s -name "refextract*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR,
CFG_MAX_ATIME_RM_REFEXTRACT, vstr))
write_message("- deleting temporary old bibdocfiles")
gc_exec_command('find %s %s -name "bibdocfile_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_BIBDOC, vstr))
write_message("- deleting old temporary WebSubmit icons")
gc_exec_command('find %s %s -name "websubmit_icon_creator_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_ICON, vstr))
write_message("- deleting old temporary WebSubmit stamps")
gc_exec_command('find %s %s -name "websubmit_file_stamper_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_STAMP, vstr))
write_message("- deleting old temporary WebJournal XML files")
gc_exec_command('find %s %s -name "webjournal_publish_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_TMPSHAREDDIR, \
CFG_MAX_ATIME_RM_WEBJOURNAL_XML, vstr))
write_message("- deleting old temporary files attached with CKEditor")
gc_exec_command('find %s/var/tmp/attachfile/ '
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_PREFIX, CFG_MAX_ATIME_RM_WEBSUBMIT_CKEDITOR_FILE,
vstr))
write_message("- deleting old temporary files attached with BibEdit")
gc_exec_command('find %s -name "bibedit*.tmp"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPSHAREDDIR + '/bibedit-cache/', CFG_MAX_ATIME_BIBEDIT_TMP,
vstr))
write_message("- deleting old XML files submitted via BibEdit")
gc_exec_command('find %s -name "bibedit*.xml"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPSHAREDDIR + '/bibedit-cache/', CFG_MAX_ATIME_BIBEDIT_XML,
vstr))
write_message("""CLEANING OF TMP FILES FINISHED""")
def clean_cache():
"""Clean the cache for expired and old files."""
write_message("""CLEANING OF OLD CACHED RSS REQUEST STARTED""")
rss_cache_dir = "%s/rss/" % CFG_CACHEDIR
try:
filenames = os.listdir(rss_cache_dir)
except OSError:
filenames = []
count = 0
for filename in filenames:
filename = os.path.join(rss_cache_dir, filename)
last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(filename)).st_mtime)
if not (datetime.datetime.now() < last_update_time + datetime.timedelta(minutes=CFG_WEBSEARCH_RSS_TTL)):
try:
os.remove(filename)
count += 1
except OSError, e:
write_message("Error: %s" % e)
write_message("""%s rss cache file pruned out of %s.""" % (count, len(filenames)))
write_message("""CLEANING OF OLD CACHED RSS REQUEST FINISHED""")
write_message("""CLEANING OF OLD CACHED WEBJOURNAL FILES STARTED""")
webjournal_cache_dir = "%s/webjournal/" % CFG_CACHEDIR
filenames = []
try:
for root, dummy, files in os.walk(webjournal_cache_dir):
filenames.extend(os.path.join(root, filename) for filename in files)
except OSError:
pass
count = 0
for filename in filenames:
filename = os.path.join(webjournal_cache_dir, filename)
last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(filename)).st_mtime)
if not (datetime.datetime.now() < last_update_time + datetime.timedelta(days=CFG_WEBJOURNAL_TTL)):
try:
os.remove(filename)
count += 1
except OSError, e:
write_message("Error: %s" % e)
write_message("""%s webjournal cache file pruned out of %s.""" % (count, len(filenames)))
write_message("""CLEANING OF OLD CACHED WEBJOURNAL FILES FINISHED""")
def clean_bibxxx():
"""
Clean unreferenced bibliographic values from bibXXx tables.
This is useful to prettify browse results, as it removes
old, no longer used values.
WARNING: this function must be run only when no bibupload is
running and/or sleeping.
"""
write_message("""CLEANING OF UNREFERENCED bibXXx VALUES STARTED""")
for xx in range(0, 100):
bibxxx = 'bib%02dx' % xx
bibrec_bibxxx = 'bibrec_bib%02dx' % xx
if task_get_option('verbose') >= 9:
num_unref_values = run_sql("""SELECT COUNT(*) FROM %(bibxxx)s
LEFT JOIN %(bibrec_bibxxx)s
ON %(bibxxx)s.id=%(bibrec_bibxxx)s.id_bibxxx
WHERE %(bibrec_bibxxx)s.id_bibrec IS NULL""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx, })[0][0]
run_sql("""DELETE %(bibxxx)s FROM %(bibxxx)s
LEFT JOIN %(bibrec_bibxxx)s
ON %(bibxxx)s.id=%(bibrec_bibxxx)s.id_bibxxx
WHERE %(bibrec_bibxxx)s.id_bibrec IS NULL""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx, })
if task_get_option('verbose') >= 9:
write_message(""" - %d unreferenced %s values cleaned""" % \
(num_unref_values, bibxxx))
write_message("""CLEANING OF UNREFERENCED bibXXx VALUES FINISHED""")
def clean_documents():
"""Delete all the bibdocs that have been set as deleted and have not been
modified since CFG_DELETED_BIBDOC_MAXLIFE days. Returns the number of
bibdocs involved."""
write_message("""CLEANING OF OBSOLETED DELETED DOCUMENTS STARTED""")
write_message("select id from bibdoc where status='DELETED' and NOW()>ADDTIME(modification_date, '%s 0:0:0')" % CFG_DELETED_BIBDOC_MAXLIFE, verbose=9)
records = run_sql("select id from bibdoc where status='DELETED' and NOW()>ADDTIME(modification_date, '%s 0:0:0')", (CFG_DELETED_BIBDOC_MAXLIFE,))
for record in records:
bibdoc = BibDoc.create_instance(record[0])
bibdoc.expunge()
write_message("DELETE FROM bibdoc WHERE id=%i" % int(record[0]), verbose=9)
run_sql("DELETE FROM bibdoc WHERE id=%s", (record[0],))
write_message("""%s obsoleted deleted documents cleaned""" % len(records))
write_message("""CLEANING OF OBSOLETED DELETED DOCUMENTS FINISHED""")
return len(records)
def check_tables():
"""
Check all DB tables. Useful to run from time to time when the
site is idle, say once a month during a weekend night.
FIXME: should produce useful output about outcome.
"""
res = run_sql("SHOW TABLES")
for row in res:
table_name = row[0]
write_message("checking table %s" % table_name)
run_sql("CHECK TABLE %s" % wash_table_column_name(table_name)) # kwalitee: disable=sql
def optimise_tables():
"""
Optimise all DB tables to defragment them in order to increase DB
performance. Useful to run from time to time when the site is
idle, say once a month during a weekend night.
FIXME: should produce useful output about outcome.
"""
res = run_sql("SHOW TABLES")
for row in res:
table_name = row[0]
write_message("optimising table %s" % table_name)
run_sql("OPTIMIZE TABLE %s" % wash_table_column_name(table_name)) # kwalitee: disable=sql
def clean_sessions():
"""
Deletes expired sessions only.
"""
deleted_sessions = 0
timelimit = convert_datestruct_to_datetext(time.gmtime())
write_message("Deleting expired sessions since %s" % (timelimit,))
query = "DELETE LOW_PRIORITY FROM session WHERE session_expiry < %s"
write_message(query % (timelimit,), verbose=9)
deleted_sessions += run_sql(query, (timelimit,))
write_message("Deleted %d sessions" % (deleted_sessions,))
def clean_bibedit_cache():
"""Deletes experied bibedit cache entries"""
datecut = datetime.datetime.now() - datetime.timedelta(days=CFG_MAX_ATIME_BIBEDIT_TMP)
datecut_str = datecut.strftime("%Y-%m-%d %H:%M:%S")
run_sql("DELETE FROM bibEDITCACHE WHERE post_date < %s", [datecut_str])
def guest_user_garbage_collector():
"""Session Garbage Collector
program flow/tasks:
1: delete expired sessions
1b:delete guest users without session
2: delete queries not attached to any user
3: delete baskets not attached to any user
4: delete alerts not attached to any user
5: delete expired mailcookies
5b: delete expired not confirmed email address
6: delete expired roles memberships
verbose - level of program output.
0 - nothing
1 - default
9 - max, debug"""
# dictionary used to keep track of number of deleted entries
delcount = {'session': 0,
'user': 0,
'user_query': 0,
'query': 0,
'bskBASKET': 0,
'user_bskBASKET': 0,
'bskREC': 0,
'bskRECORDCOMMENT': 0,
'bskEXTREC': 0,
'bskEXTFMT': 0,
'user_query_basket': 0,
'mail_cookie': 0,
'email_addresses': 0,
'role_membership' : 0}
write_message("CLEANING OF GUEST SESSIONS STARTED")
# 1 - DELETE EXPIRED SESSIONS
write_message("- deleting expired sessions")
timelimit = convert_datestruct_to_datetext(time.gmtime())
write_message(" DELETE FROM session WHERE"
" session_expiry < %s \n" % (timelimit,), verbose=9)
delcount['session'] += run_sql("DELETE FROM session WHERE"
" session_expiry < %s """, (timelimit,))
# 1b - DELETE GUEST USERS WITHOUT SESSION
write_message("- deleting guest users without session")
# get uids
write_message(""" SELECT u.id\n FROM user AS u LEFT JOIN session AS s\n ON u.id = s.uid\n WHERE s.uid IS NULL AND u.email = ''""", verbose=9)
result = run_sql("""SELECT u.id
FROM user AS u LEFT JOIN session AS s
ON u.id = s.uid
WHERE s.uid IS NULL AND u.email = ''""")
write_message(result, verbose=9)
if result:
# work on slices of result list in case of big result
for i in range(0, len(result), CFG_MYSQL_ARGUMENTLIST_SIZE):
# create string of uids
uidstr = ''
for (id_user,) in result[i:i + CFG_MYSQL_ARGUMENTLIST_SIZE]:
if uidstr: uidstr += ','
uidstr += "%s" % (id_user,)
# delete users
write_message(" DELETE FROM user WHERE"
" id IN (TRAVERSE LAST RESULT) AND email = '' \n", verbose=9)
delcount['user'] += run_sql("DELETE FROM user WHERE"
" id IN (%s) AND email = ''" % (uidstr,))
# 2 - DELETE QUERIES NOT ATTACHED TO ANY USER
# first step, delete from user_query
write_message("- deleting user_queries referencing"
" non-existent users")
# find user_queries referencing non-existent users
write_message(" SELECT DISTINCT uq.id_user\n"
" FROM user_query AS uq LEFT JOIN user AS u\n"
" ON uq.id_user = u.id\n WHERE u.id IS NULL", verbose=9)
result = run_sql("""SELECT DISTINCT uq.id_user
FROM user_query AS uq LEFT JOIN user AS u
ON uq.id_user = u.id
WHERE u.id IS NULL""")
write_message(result, verbose=9)
# delete in user_query one by one
write_message(" DELETE FROM user_query WHERE"
" id_user = 'TRAVERSE LAST RESULT' \n", verbose=9)
for (id_user,) in result:
delcount['user_query'] += run_sql("""DELETE FROM user_query
WHERE id_user = %s""" % (id_user,))
# delete the actual queries
write_message("- deleting queries not attached to any user")
# select queries that must be deleted
write_message(""" SELECT DISTINCT q.id\n FROM query AS q LEFT JOIN user_query AS uq\n ON uq.id_query = q.id\n WHERE uq.id_query IS NULL AND\n q.type <> 'p' """, verbose=9)
result = run_sql("""SELECT DISTINCT q.id
FROM query AS q LEFT JOIN user_query AS uq
ON uq.id_query = q.id
WHERE uq.id_query IS NULL AND
q.type <> 'p'""")
write_message(result, verbose=9)
# delete queries one by one
write_message(""" DELETE FROM query WHERE id = 'TRAVERSE LAST RESULT' \n""", verbose=9)
for (id_user,) in result:
delcount['query'] += run_sql("""DELETE FROM query WHERE id = %s""", (id_user,))
# 3 - DELETE BASKETS NOT OWNED BY ANY USER
write_message("- deleting baskets not owned by any user")
# select basket ids
write_message(""" SELECT ub.id_bskBASKET\n FROM user_bskBASKET AS ub LEFT JOIN user AS u\n ON u.id = ub.id_user\n WHERE u.id IS NULL""", verbose=9)
try:
result = run_sql("""SELECT ub.id_bskBASKET
FROM user_bskBASKET AS ub LEFT JOIN user AS u
ON u.id = ub.id_user
WHERE u.id IS NULL""")
except:
result = []
write_message(result, verbose=9)
# delete from user_basket and basket one by one
write_message(""" DELETE FROM user_bskBASKET WHERE id_bskBASKET = 'TRAVERSE LAST RESULT' """, verbose=9)
write_message(""" DELETE FROM bskBASKET WHERE id = 'TRAVERSE LAST RESULT' """, verbose=9)
write_message(""" DELETE FROM bskREC WHERE id_bskBASKET = 'TRAVERSE LAST RESULT'""", verbose=9)
write_message(""" DELETE FROM bskRECORDCOMMENT WHERE id_bskBASKET = 'TRAVERSE LAST RESULT' \n""", verbose=9)
for (id_basket,) in result:
delcount['user_bskBASKET'] += run_sql("""DELETE FROM user_bskBASKET WHERE id_bskBASKET = %s""", (id_basket,))
delcount['bskBASKET'] += run_sql("""DELETE FROM bskBASKET WHERE id = %s""", (id_basket,))
delcount['bskREC'] += run_sql("""DELETE FROM bskREC WHERE id_bskBASKET = %s""", (id_basket,))
delcount['bskRECORDCOMMENT'] += run_sql("""DELETE FROM bskRECORDCOMMENT WHERE id_bskBASKET = %s""", (id_basket,))
write_message(""" SELECT DISTINCT ext.id, rec.id_bibrec_or_bskEXTREC FROM bskEXTREC AS ext \nLEFT JOIN bskREC AS rec ON ext.id=-rec.id_bibrec_or_bskEXTREC WHERE id_bibrec_or_bskEXTREC is NULL""", verbose=9)
try:
result = run_sql("""SELECT DISTINCT ext.id FROM bskEXTREC AS ext
LEFT JOIN bskREC AS rec ON ext.id=-rec.id_bibrec_or_bskEXTREC
WHERE id_bibrec_or_bskEXTREC is NULL""")
except:
result = []
write_message(result, verbose=9)
write_message(""" DELETE FROM bskEXTREC WHERE id = 'TRAVERSE LAST RESULT' """, verbose=9)
write_message(""" DELETE FROM bskEXTFMT WHERE id_bskEXTREC = 'TRAVERSE LAST RESULT' \n""", verbose=9)
for (id_basket,) in result:
delcount['bskEXTREC'] += run_sql("""DELETE FROM bskEXTREC WHERE id=%s""", (id_basket,))
delcount['bskEXTFMT'] += run_sql("""DELETE FROM bskEXTFMT WHERE id_bskEXTREC=%s""", (id_basket,))
# 4 - DELETE ALERTS NOT OWNED BY ANY USER
write_message('- deleting alerts not owned by any user')
# select user ids in uqb that reference non-existent users
write_message("""SELECT DISTINCT uqb.id_user FROM user_query_basket AS uqb LEFT JOIN user AS u ON uqb.id_user = u.id WHERE u.id IS NULL""", verbose=9)
result = run_sql("""SELECT DISTINCT uqb.id_user FROM user_query_basket AS uqb LEFT JOIN user AS u ON uqb.id_user = u.id WHERE u.id IS NULL""")
write_message(result, verbose=9)
# delete all these entries
for (id_user,) in result:
write_message("""DELETE FROM user_query_basket WHERE id_user = 'TRAVERSE LAST RESULT """, verbose=9)
delcount['user_query_basket'] += run_sql("""DELETE FROM user_query_basket WHERE id_user = %s """, (id_user,))
# 5 - delete expired mailcookies
write_message("""mail_cookie_gc()""", verbose=9)
delcount['mail_cookie'] = mail_cookie_gc()
## 5b - delete expired not confirmed email address
write_message("""DELETE FROM user WHERE note='2' AND NOW()>ADDTIME(last_login, '%s 0:0:0')""" % CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS, verbose=9)
delcount['email_addresses'] = run_sql("""DELETE FROM user WHERE note='2' AND NOW()>ADDTIME(last_login, '%s 0:0:0')""", (CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS,))
# 6 - delete expired roles memberships
write_message("""DELETE FROM user_accROLE WHERE expiration<NOW()""", verbose=9)
delcount['role_membership'] = run_sql("""DELETE FROM user_accROLE WHERE expiration<NOW()""")
# print STATISTICS
write_message("""- statistics about deleted data: """)
write_message(""" %7s sessions.""" % (delcount['session'],))
write_message(""" %7s users.""" % (delcount['user'],))
write_message(""" %7s user_queries.""" % (delcount['user_query'],))
write_message(""" %7s queries.""" % (delcount['query'],))
write_message(""" %7s baskets.""" % (delcount['bskBASKET'],))
write_message(""" %7s user_baskets.""" % (delcount['user_bskBASKET'],))
write_message(""" %7s basket_records.""" % (delcount['bskREC'],))
write_message(""" %7s basket_external_records.""" % (delcount['bskEXTREC'],))
write_message(""" %7s basket_external_formats.""" % (delcount['bskEXTFMT'],))
write_message(""" %7s basket_comments.""" % (delcount['bskRECORDCOMMENT'],))
write_message(""" %7s user_query_baskets.""" % (delcount['user_query_basket'],))
write_message(""" %7s mail_cookies.""" % (delcount['mail_cookie'],))
write_message(""" %7s non confirmed email addresses.""" % delcount['email_addresses'])
write_message(""" %7s role_memberships.""" % (delcount['role_membership'],))
write_message("""CLEANING OF GUEST SESSIONS FINISHED""")
def main():
"""Main that construct all the bibtask."""
short_options = "lpgbdacTkoS"
long_options = ["logs",
"tempfiles",
"guests",
"bibxxx",
"documents",
"all",
"cache",
"tasks",
"check-tables",
"optimise-tables",
"sessions",
"bibedit-cache"]
task_init(authorization_action='runinveniogc',
authorization_msg="InvenioGC Task Submission",
help_specific_usage=" -l, --logs\t\tClean old logs.\n"
" -p, --tempfiles\tClean old temporary files.\n"
" -g, --guests\t\tClean expired guest user related information. [default action]\n"
" -b, --bibxxx\t\tClean unreferenced bibliographic values in bibXXx tables.\n"
" -c, --cache\t\tClean cache by removing old files.\n"
" -d, --documents\tClean deleted documents and revisions older than %s days.\n"
" -T, --tasks\t\tClean the BibSched queue removing/archiving old DONE tasks.\n"
" -a, --all\t\tClean all of the above (but do not run check/optimise table options below).\n"
" -k, --check-tables\tCheck DB tables to discover potential problems.\n"
" -o, --optimise-tables\tOptimise DB tables to increase performance.\n"
" -S, --sessions\tClean expired sessions from the DB.\n"
" --bibedit-cache Clean expired bibedit cache entries from the DB.\n" % CFG_DELETED_BIBDOC_MAXLIFE,
version=__revision__,
specific_params=(short_options, long_options),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=task_submit_check_options,
task_run_fnc=task_run_core)
def task_submit_check_options():
if not task_get_option('logs') and \
not task_get_option('tempfiles') and \
not task_get_option('guests') and \
not task_get_option('bibxxx') and \
not task_get_option('documents') and \
not task_get_option('cache') and \
not task_get_option('tasks') and \
not task_get_option('check-tables') and \
not task_get_option('sessions') and \
not task_get_option('optimise-tables') and \
not task_get_option('bibedit-cache'):
task_set_option('sessions', True)
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ('-l', '--logs'):
task_set_option('logs', True)
return True
elif key in ('-p', '--tempfiles'):
task_set_option('tempfiles', True)
return True
elif key in ('-g', '--guests'):
task_set_option('guests', True)
return True
elif key in ('-b', '--bibxxx'):
task_set_option('bibxxx', True)
return True
elif key in ('-d', '--documents'):
task_set_option('documents', True)
return True
elif key in ('-c', '--cache'):
task_set_option('cache', True)
return True
elif key in ('-t', '--tasks'):
task_set_option('tasks', True)
return True
elif key in ('-k', '--check-tables'):
task_set_option('check-tables', True)
return True
elif key in ('-o', '--optimise-tables'):
task_set_option('optimise-tables', True)
return True
elif key in ('-S', '--sessions'):
task_set_option('sessions', True)
return True
elif key == '--bibedit-cache':
task_set_option('bibedit-cache', True)
return True
elif key in ('-a', '--all'):
task_set_option('logs', True)
task_set_option('tempfiles', True)
task_set_option('guests', True)
task_set_option('bibxxx', True)
task_set_option('documents', True)
task_set_option('cache', True)
task_set_option('tasks', True)
task_set_option('sessions', True)
task_set_option('bibedit-cache', True)
return True
return False
def task_run_core():
""" Reimplement to add the body of the task."""
if task_get_option('guests'):
guest_user_garbage_collector()
if task_get_option('logs'):
clean_logs()
if task_get_option('tempfiles'):
clean_tempfiles()
if task_get_option('bibxxx'):
clean_bibxxx()
if task_get_option('documents'):
clean_documents()
if task_get_option('cache'):
clean_cache()
if task_get_option('tasks'):
gc_tasks()
if task_get_option('check-tables'):
check_tables()
if task_get_option('optimise-tables'):
optimise_tables()
if task_get_option('sessions'):
clean_sessions()
if task_get_option('bibedit-cache'):
clean_bibedit_cache()
return True
if __name__ == '__main__':
main()
| gpl-2.0 |
mwmuni/LIGGGHTS_GUI | networkx/algorithms/centrality/current_flow_closeness.py | 54 | 3615 | """Current-flow closeness centrality measures.
"""
# Copyright (C) 2010-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
__all__ = ['current_flow_closeness_centrality', 'information_centrality']
def current_flow_closeness_centrality(G, weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
Current-flow closeness centrality is variant of closeness
centrality based on effective resistance between nodes in
a network. This metric is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] Karen Stephenson and Marvin Zelen:
Rethinking centrality: Methods and examples.
Social Networks 11(1):1-37, 1989.
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
from networkx.utils import reverse_cuthill_mckee_ordering
import numpy as np
import scipy
if G.is_directed():
raise nx.NetworkXError(
"current_flow_closeness_centrality() not defined for digraphs.")
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername = {"full": FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
n = H.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col = C2.get_row(v)
for w in H:
betweenness[v] += col[v]-2*col[w]
betweenness[w] += col[v]
for v in H:
betweenness[v] = 1.0 / (betweenness[v])
return dict((ordering[k], float(v)) for k, v in betweenness.items())
information_centrality = current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| gpl-3.0 |
eli-b/mongoengine | tests/test_datastructures.py | 23 | 3606 | import unittest
from mongoengine.base.datastructures import StrictDict, SemiStrictDict
class TestStrictDict(unittest.TestCase):
def strict_dict_class(self, *args, **kwargs):
return StrictDict.create(*args, **kwargs)
def setUp(self):
self.dtype = self.strict_dict_class(("a", "b", "c"))
def test_init(self):
d = self.dtype(a=1, b=1, c=1)
self.assertEqual((d.a, d.b, d.c), (1, 1, 1))
def test_init_fails_on_nonexisting_attrs(self):
self.assertRaises(AttributeError, lambda: self.dtype(a=1, b=2, d=3))
def test_eq(self):
d = self.dtype(a=1, b=1, c=1)
dd = self.dtype(a=1, b=1, c=1)
e = self.dtype(a=1, b=1, c=3)
f = self.dtype(a=1, b=1)
g = self.strict_dict_class(("a", "b", "c", "d"))(a=1, b=1, c=1, d=1)
h = self.strict_dict_class(("a", "c", "b"))(a=1, b=1, c=1)
i = self.strict_dict_class(("a", "c", "b"))(a=1, b=1, c=2)
self.assertEqual(d, dd)
self.assertNotEqual(d, e)
self.assertNotEqual(d, f)
self.assertNotEqual(d, g)
self.assertNotEqual(f, d)
self.assertEqual(d, h)
self.assertNotEqual(d, i)
def test_setattr_getattr(self):
d = self.dtype()
d.a = 1
self.assertEqual(d.a, 1)
self.assertRaises(AttributeError, lambda: d.b)
def test_setattr_raises_on_nonexisting_attr(self):
d = self.dtype()
def _f():
d.x = 1
self.assertRaises(AttributeError, _f)
def test_setattr_getattr_special(self):
d = self.strict_dict_class(["items"])
d.items = 1
self.assertEqual(d.items, 1)
def test_get(self):
d = self.dtype(a=1)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('b', 'bla'), 'bla')
def test_items(self):
d = self.dtype(a=1)
self.assertEqual(d.items(), [('a', 1)])
d = self.dtype(a=1, b=2)
self.assertEqual(d.items(), [('a', 1), ('b', 2)])
def test_mappings_protocol(self):
d = self.dtype(a=1, b=2)
assert dict(d) == {'a': 1, 'b': 2}
assert dict(**d) == {'a': 1, 'b': 2}
class TestSemiSrictDict(TestStrictDict):
def strict_dict_class(self, *args, **kwargs):
return SemiStrictDict.create(*args, **kwargs)
def test_init_fails_on_nonexisting_attrs(self):
# disable irrelevant test
pass
def test_setattr_raises_on_nonexisting_attr(self):
# disable irrelevant test
pass
def test_setattr_getattr_nonexisting_attr_succeeds(self):
d = self.dtype()
d.x = 1
self.assertEqual(d.x, 1)
def test_init_succeeds_with_nonexisting_attrs(self):
d = self.dtype(a=1, b=1, c=1, x=2)
self.assertEqual((d.a, d.b, d.c, d.x), (1, 1, 1, 2))
def test_iter_with_nonexisting_attrs(self):
d = self.dtype(a=1, b=1, c=1, x=2)
self.assertEqual(list(d), ['a', 'b', 'c', 'x'])
def test_iteritems_with_nonexisting_attrs(self):
d = self.dtype(a=1, b=1, c=1, x=2)
self.assertEqual(list(d.iteritems()), [('a', 1), ('b', 1), ('c', 1), ('x', 2)])
def tets_cmp_with_strict_dicts(self):
d = self.dtype(a=1, b=1, c=1)
dd = StrictDict.create(("a", "b", "c"))(a=1, b=1, c=1)
self.assertEqual(d, dd)
def test_cmp_with_strict_dict_with_nonexisting_attrs(self):
d = self.dtype(a=1, b=1, c=1, x=2)
dd = StrictDict.create(("a", "b", "c", "x"))(a=1, b=1, c=1, x=2)
self.assertEqual(d, dd)
if __name__ == '__main__':
unittest.main()
| mit |
lafranceinsoumise/api-django | agir/people/management/commands/mailtrain_update.py | 1 | 1382 | from datetime import datetime
import string
from uuid import UUID
from django.core.management import BaseCommand
from django.utils import timezone
from agir.lib.mailtrain import update_person
from agir.people.models import Person
PADDING = "0000000-0000-0000-0000-000000000000"
class Command(BaseCommand):
help = "Synchronize all the database with mailtrain"
def handle(self, *args, **kwargs):
start = datetime.now()
i = 0
min_letter = string.hexdigits[timezone.now().day % 8 * 2]
max_letter = string.hexdigits[(timezone.now().day + 1) % 8 * 2]
qs = Person.objects.filter(id__gte=UUID(min_letter + PADDING))
if max_letter > min_letter:
qs = qs.filter(id__lt=UUID(max_letter + PADDING))
try:
for person in qs.iterator():
update_person(person)
if kwargs["verbosity"] > 1:
print("Updated %s " % person.email)
i += 1
except Exception as e:
duration = datetime.now() - start
print(
f"Updated {i} people over {qs.count()} in {str(duration.seconds)} seconds."
)
raise e
duration = datetime.now() - start
print(
f"Updated people from {min_letter} to {max_letter} ({str(i)}) in {str(duration.seconds)} seconds."
)
| agpl-3.0 |
DayGitH/Python-Challenges | DailyProgrammer/DP20140625B.py | 1 | 4512 | """
[6/25/2014] Challenge #168 [Intermediate] Block Count, Length & Area
https://www.reddit.com/r/dailyprogrammer/comments/291x9h/6252014_challenge_168_intermediate_block_count/
#Description:
In construction there comes a need to compute the length and area of a jobsite. The areas and lengths computed are used
by estimators
to price out the cost to build that jobsite. If for example a jobsite was a building with a parking lot and had
concrete walkways and some nice
pavers and landscaping it would be good to know the areas of all these and some lengths (for concrete curbs, landscape
headerboard, etc)
So for today's challenge we are going to automate the tedious process of calculating the length and area of aerial
plans or photos.
#ASCII Photo:
To keep this within our scope we have converted the plans into an ASCII picture. We have scaled the plans so 1
character is a square
with dimensions of 10 ft x 10 ft.
The photo is case sensitive. so a "O" and "o" are 2 different blocks of areas to compute.
#Blocks Counts, Lengths and Areas:
Some shorthand to follow:
* SF = square feet
* LF = linear feet
If you have the following picture.
####
OOOO
####
mmmm
* # has a block count of 2. we have 2 areas not joined made up of #
* O and m have a block count of 1. they only have 1 areas each made up of their ASCII character.
* O has 4 blocks. Each block is 100 SF and so you have 400 SF of O.
* O has a circumference length of that 1 block count of 100 LF.
* m also has 4 blocks so there is 400 SF of m and circumference length of 100 LF
* # has 2 block counts each of 4. So # has a total area of 800 SF and a total circumference length of 200 LF.
Pay close attention to how "#" was handled. It was seen as being 2 areas made up of # but the final length and area
adds them together even thou they not together. It recognizes the two areas by having a block count of 2 (2 non-joined
areas made up of "#" characters) while the others only have a block count of 1.
#Input:
Your input is a 2-D ASCII picture. The ASCII characters used are any non-whitespace characters.
##Example:
####
@@oo
o*@!
****
#Output:
You give a Length and Area report of all the blocks.
##Example: (using the example input)
Block Count, Length & Area Report
=================================
#: Total SF (400), Total Circumference LF (100) - Found 1 block
@: Total SF (300), Total Circumference LF (100) - Found 2 blocks
o: Total SF (300), Total Circumference LF (100) - Found 2 blocks
*: Total SF (500), Total Circumference LF (120) - Found 1 block
!: Total SF (100), Total Circumference LF (40) - Found 1 block
#Easy Mode (optional):
Remove the need to compute the block count. Just focus on area and circumference length.
#Challenge Input:
So we have a "B" building. It has a "D" driveway. "O" and "o" landscaping. "c" concrete walks. "p" pavers. "V" & "v"
valley gutters. @ and T tree planting.
Finally we have # as Asphalt Paving.
ooooooooooooooooooooooDDDDDooooooooooooooooooooooooooooo
ooooooooooooooooooooooDDDDDooooooooooooooooooooooooooooo
ooo##################o#####o#########################ooo
o@o##################o#####o#########################ooo
ooo##################o#####o#########################oTo
o@o##################################################ooo
ooo##################################################oTo
o@o############ccccccccccccccccccccccc###############ooo
pppppppppppppppcOOOOOOOOOOOOOOOOOOOOOc###############oTo
o@o############cOBBBBBBBBBBBBBBBBBBBOc###############ooo
ooo####V#######cOBBBBBBBBBBBBBBBBBBBOc###############oTo
o@o####V#######cOBBBBBBBBBBBBBBBBBBBOc###############ooo
ooo####V#######cOBBBBBBBBBBBBBBBBBBBOcpppppppppppppppppp
o@o####V#######cOBBBBBBBBBBBBBBBBBBBOc###############ooo
ooo####V#######cOBBBBBBBBBBBBBBBBBBBOc######v########oTo
o@o####V#######cOBBBBBBBBBBBBBBBBBBBOc######v########ooo
ooo####V#######cOOOOOOOOOOOOOOOOOOOOOc######v########oTo
o@o####V#######ccccccccccccccccccccccc######v########ooo
ooo####V#######ppppppppppppppppppppppp######v########oTo
o@o############ppppppppppppppppppppppp###############ooo
oooooooooooooooooooooooooooooooooooooooooooooooooooooooo
oooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#FAQ:
Diagonals do not connect. The small example shows this. The @ areas are 2 blocks and not 1 because of the Diagonal.
"""
def main():
pass
if __name__ == "__main__":
main()
| mit |
newemailjdm/pybrain | pybrain/rl/environments/classic/mountaincar.py | 26 | 3378 | __author__ = 'Tom Schaul, tom@idsia.ch'
"""
Adaptation of the MountainCar Environment
from the "FAReinforcement" library
of Jose Antonio Martin H. (version 1.0).
"""
from scipy import array, cos
from pybrain.rl.environments.episodic import EpisodicTask
class MountainCar(EpisodicTask):
# The current real values of the state
cur_pos = -0.5
cur_vel = 0.0
cur_state = [cur_pos, cur_vel]
#The number of actions.
action_list = (-1.0 , 0.0 , 1.0)
nactions = len(action_list)
nsenses = 3
# number of steps of the current trial
steps = 0
# number of the current episode
episode = 0
# Goal Position
goalPos = 0.45
maxSteps = 999
resetOnSuccess = False
def __init__(self):
self.nactions = len(self.action_list)
self.reset()
self.cumreward = 0
def reset(self):
self.state = self.GetInitialState()
def getObservation(self):
#print(array([self.state[0], self.state[1] * 100, 1]))
return array([self.state[0], self.state[1] * 100, 1])
def performAction(self, action):
if self.done > 0:
self.done += 1
else:
self.state = self.DoAction(action, self.state)
self.r, self.done = self.GetReward(self.state)
self.cumreward += self.r
def getReward(self):
return self.r
def GetInitialState(self):
self.StartEpisode()
return [-0.5, 0.]
def StartEpisode(self):
self.steps = 0
self.episode = self.episode + 1
self.done = 0
def isFinished(self):
if self.done>=3 and self.resetOnSuccess:
self.reset()
return False
else:
return self.done>=3
def GetReward(self, s):
# MountainCarGetReward returns the reward at the current state
# x: a vector of position and velocity of the car
# r: the returned reward.
# f: true if the car reached the goal, otherwise f is false
position = s[0]
vel = s[1]
# bound for position; the goal is to reach position = 0.45
bpright = self.goalPos
r = 0
f = 0
if position >= bpright:
r = 1
f = 1
if self.steps >= self.maxSteps:
f = 5
return r, f
def DoAction(self, a, s):
#MountainCarDoAction: executes the action (a) into the mountain car
# acti: is the force to be applied to the car
# x: is the vector containning the position and speed of the car
# xp: is the vector containing the new position and velocity of the car
#print('action',a)
#print('state',s)
force = self.action_list[a]
self.steps = self.steps + 1
position = s[0]
speed = s[1]
# bounds for position
bpleft = -1.4
# bounds for speed
bsleft = -0.07
bsright = 0.07
speedt1 = speed + (0.001 * force) + (-0.0025 * cos(3.0 * position))
if speedt1 < bsleft:
speedt1 = bsleft
elif speedt1 > bsright:
speedt1 = bsright
post1 = position + speedt1
if post1 <= bpleft:
post1 = bpleft
speedt1 = 0.0
return [post1, speedt1]
| bsd-3-clause |
kxliugang/edx-platform | common/test/acceptance/pages/studio/video/video.py | 45 | 22324 | """
CMS Video
"""
import time
import os
import requests
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.javascript import wait_for_js, js_defined
from ....tests.helpers import YouTubeStubConfig
from ...lms.video.video import VideoPage
from ...common.utils import wait_for_notification
from selenium.webdriver.common.keys import Keys
CLASS_SELECTORS = {
'video_container': 'div.video',
'video_init': '.is-initialized',
'video_xmodule': '.xmodule_VideoModule',
'video_spinner': '.video-wrapper .spinner',
'video_controls': 'section.video-controls',
'attach_asset': '.upload-dialog > input[type="file"]',
'upload_dialog': '.wrapper-modal-window-assetupload',
'xblock': '.add-xblock-component',
'slider_range': '.slider-range',
'error': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_bar': '.videolist-extra-videos',
'status': '.transcripts-message-status',
'attach_transcript': '.file-chooser > input[type="file"]',
}
BUTTON_SELECTORS = {
'create_video': 'button[data-category="video"]',
'handout_download': '.video-handout.video-download-button a',
'handout_download_editor': '.wrapper-comp-setting.file-uploader .download-action',
'upload_asset': '.upload-action',
'asset_submit': '.action-upload',
'handout_clear': '.wrapper-comp-setting.file-uploader .setting-clear',
'translations_clear': '.metadata-video-translations .setting-clear',
'translation_add': '.wrapper-translations-settings > a',
'import': '.setting-import',
'download_to_edit': '.setting-download',
'disabled_download_to_edit': '.setting-download.is-disabled',
'upload_new_timed_transcripts': '.setting-upload',
'replace': '.setting-replace',
'choose': '.setting-choose',
'use_existing': '.setting-use-existing',
'collapse_link': '.collapse-action.collapse-setting',
}
DISPLAY_NAME = "Component Display Name"
DEFAULT_SETTINGS = [
# basic
[DISPLAY_NAME, 'Video', False],
['Default Video URL', 'http://youtu.be/3_yD_cEKoCk, , ', False],
# advanced
[DISPLAY_NAME, 'Video', False],
['Default Timed Transcript', '', False],
['Download Transcript Allowed', 'False', False],
['Downloadable Transcript URL', '', False],
['Show Transcript', 'True', False],
['Transcript Languages', '', False],
['Upload Handout', '', False],
['Video Available on Web Only', 'False', False],
['Video Download Allowed', 'False', False],
['Video File URLs', '', False],
['Video ID', '', False],
['Video Start Time', '00:00:00', False],
['Video Stop Time', '00:00:00', False],
['YouTube ID', '3_yD_cEKoCk', False],
['YouTube ID for .75x speed', '', False],
['YouTube ID for 1.25x speed', '', False],
['YouTube ID for 1.5x speed', '', False]
]
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@js_defined('window.Video', 'window.RequireJS.require', 'window.jQuery', 'window.XModule', 'window.XBlock',
'window.MathJax', 'window.MathJax.isReady')
class VideoComponentPage(VideoPage):
"""
CMS Video Component Page
"""
url = None
@wait_for_js
def is_browser_on_page(self):
return self.q(css='div{0}'.format(CLASS_SELECTORS['video_xmodule'])).present or self.q(
css='div{0}'.format(CLASS_SELECTORS['xblock'])).present
def get_element_selector(self, class_name, vertical=False):
return super(VideoComponentPage, self).get_element_selector(class_name, vertical=vertical)
def _wait_for(self, check_func, desc, result=False, timeout=30):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
check_func (callable): Promise function to be fulfilled.
desc (str): Description of the Promise, used in log messages.
result (bool): Indicates whether we need result from Promise or not
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out.
"""
if result:
return Promise(check_func, desc, timeout=timeout).fulfill()
else:
return EmptyPromise(check_func, desc, timeout=timeout).fulfill()
def wait_for_video_component_render(self):
"""
Wait until video component rendered completely
"""
if not YouTubeStubConfig.get_configuration().get('youtube_api_blocked'):
self._wait_for(lambda: self.q(css=CLASS_SELECTORS['video_init']).present, 'Video Player Initialized')
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['video_spinner']).visible,
'Video Buffering Completed')
self._wait_for(self.is_controls_visible, 'Player Controls are Visible')
@wait_for_js
def is_controls_visible(self):
"""
Get current visibility sate of all video controls.
Returns:
bool: True means video controls are visible for all videos, False means video controls are not visible
for one or more videos
"""
return self.q(css=CLASS_SELECTORS['video_controls']).visible
def click_button(self, button_name, index=0, require_notification=False):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
index (int): query index
"""
self.q(css=BUTTON_SELECTORS[button_name]).nth(index).click()
if require_notification:
wait_for_notification(self)
self.wait_for_ajax()
@staticmethod
def file_path(filename):
"""
Construct file path to be uploaded to assets.
Arguments:
filename (str): asset filename
"""
return os.sep.join(__file__.split(os.sep)[:-5]) + '/data/uploads/' + filename
def upload_handout(self, handout_filename):
"""
Upload a handout file to assets
Arguments:
handout_filename (str): handout file name
"""
self.upload_asset(handout_filename)
def upload_asset(self, asset_filename, asset_type='handout', index=0):
"""
Upload a asset file to assets
Arguments:
asset_filename (str): asset file name
asset_type (str): one of `handout`, `transcript`
index (int): query index
"""
asset_file_path = self.file_path(asset_filename)
self.click_button('upload_asset', index)
self.q(css=CLASS_SELECTORS['attach_asset']).results[0].send_keys(asset_file_path)
self.click_button('asset_submit')
# Only srt format transcript files can be uploaded, If an error
# occurs due to incorrect transcript file we will return from here
if asset_type == 'transcript' and self.q(css='#upload_error').present:
return
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['upload_dialog']).present, 'Upload Completed')
def clear_handout(self):
"""
Clear handout from settings
"""
self.click_button('handout_clear')
def _get_handout(self, url):
"""
Download handout at `url`
"""
kwargs = dict()
session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
return response.status_code < 400, response.headers
def download_handout(self, mime_type, is_editor=False):
"""
Download handout with mime type specified by `mime_type`
Arguments:
mime_type (str): mime type of handout file
Returns:
tuple: Handout download result.
"""
selector = BUTTON_SELECTORS['handout_download_editor'] if is_editor else BUTTON_SELECTORS['handout_download']
handout_url = self.q(css=selector).attrs('href')[0]
result, headers = self._get_handout(handout_url)
return result, headers['content-type'] == mime_type
@property
def is_handout_button_visible(self):
"""
Check if handout download button is visible
"""
return self.q(css=BUTTON_SELECTORS['handout_download']).visible
def create_video(self):
"""
Create a Video Component by clicking on Video button and wait for rendering completion.
"""
# Create video
self.click_button('create_video', require_notification=True)
self.wait_for_video_component_render()
def xblocks(self):
"""
Tells the total number of video xblocks present on current unit page.
Returns:
(int): total video xblocks
"""
return len(self.q(css='.xblock-header').filter(
lambda el: 'xblock-header-video' in el.get_attribute('class')).results)
def focus_caption_line(self, line_number):
"""
Focus a caption line as specified by `line_number`
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
self.q(css=caption_line_selector).results[0].send_keys(Keys.ENTER)
def is_caption_line_focused(self, line_number):
"""
Check if a caption line focused
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
attributes = self.q(css=caption_line_selector).attrs('class')
return 'focused' in attributes
@property
def is_slider_range_visible(self):
"""
Return True if slider range is visible.
"""
return self.q(css=CLASS_SELECTORS['slider_range']).visible
def verify_settings(self):
"""
Verify that video component has correct default settings.
"""
query = '.wrapper-comp-setting'
settings = self.q(css=query).results
if len(DEFAULT_SETTINGS) != len(settings):
return False
for counter, setting in enumerate(settings):
is_verified = self._verify_setting_entry(setting,
DEFAULT_SETTINGS[counter][0],
DEFAULT_SETTINGS[counter][1])
if not is_verified:
return is_verified
return True
@staticmethod
def _verify_setting_entry(setting, field_name, field_value):
"""
Verify a `setting` entry.
Arguments:
setting (WebElement): Selenium WebElement
field_name (str): Name of field
field_value (str): Value of field
Returns:
bool: Does `setting` have correct value.
"""
if field_name != setting.find_element_by_class_name('setting-label').get_attribute('innerHTML'):
return False
# Get class attribute values
classes = setting.get_attribute('class').split()
list_type_classes = ['metadata-list-enum', 'metadata-dict', 'metadata-video-translations']
is_list_type = any(list_type in classes for list_type in list_type_classes)
if is_list_type:
current_value = ', '.join(
ele.get_attribute('value') for ele in setting.find_elements_by_class_name('list-settings-item'))
elif 'metadata-videolist-enum' in setting.get_attribute('class'):
current_value = ', '.join(item.find_element_by_tag_name('input').get_attribute('value') for item in
setting.find_elements_by_class_name('videolist-settings-item'))
else:
current_value = setting.find_element_by_class_name('setting-input').get_attribute('value')
if field_value != current_value:
return False
# Clear button should be visible(active class is present) for
# every setting that don't have 'metadata-videolist-enum' class
if 'metadata-videolist-enum' not in setting.get_attribute('class'):
setting_clear_button = setting.find_elements_by_class_name('setting-clear')[0]
if 'active' not in setting_clear_button.get_attribute('class'):
return False
return True
def set_field_value(self, field_name, field_value, field_type='input'):
"""
Set settings input `field` with `value`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
field_type (str): `input`, `select` etc(more to be added later)
"""
query = '.wrapper-comp-setting > label:nth-child(1)'
field_id = ''
if field_type == 'input':
for index, _ in enumerate(self.q(css=query)):
if field_name in self.q(css=query).nth(index).text[0]:
field_id = self.q(css=query).nth(index).attrs('for')[0]
break
self.q(css='#{}'.format(field_id)).fill(field_value)
elif field_type == 'select':
self.q(css='select[name="{0}"] option[value="{1}"]'.format(field_name, field_value)).first.click()
def verify_field_value(self, field_name, field_value):
"""
Get settings value of `field_name`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
Returns:
bool: If `field_name` has `field_value`
"""
_, setting = self._get_setting_entry(field_name)
return self._verify_setting_entry(setting, field_name, field_value)
def _get_setting_entry(self, field_name):
"""
Get setting entry of `field_name`
Arguments:
field_name (str): Name of field
Returns:
setting (WebElement): Selenium WebElement
"""
for index, setting in enumerate(self.q(css='.wrapper-comp-setting').results):
if setting.find_element_by_class_name('setting-label').get_attribute('innerHTML') == field_name:
return index, setting
def translations_count(self):
"""
Get count of translations.
"""
return len(self.q(css='.wrapper-translations-settings .list-settings-item').results)
def select_translation_language(self, language_code, index=0):
"""
Select translation language as specified by `language_code`
Arguments:
language_code (str):
index (int): query index
"""
translations_items = '.wrapper-translations-settings .list-settings-item'
language_selector = translations_items + ' select option[value="{}"]'.format(language_code)
self.q(css=language_selector).nth(index).click()
def upload_translation(self, transcript_name, language_code):
"""
Upload a translation file.
Arguments:
transcript_name (str):
language_code (str):
"""
self.click_button('translation_add')
translations_count = self.translations_count()
self.select_translation_language(language_code, translations_count - 1)
self.upload_asset(transcript_name, asset_type='transcript', index=translations_count - 1)
def replace_translation(self, old_lang_code, new_lang_code, transcript_name):
"""
Replace a translation.
Arguments:
old_lang_code (str):
new_lang_code (str):
transcript_name (str):
"""
language_codes = self.translations()
index = language_codes.index(old_lang_code)
self.select_translation_language(new_lang_code, index)
self.upload_asset(transcript_name, asset_type='transcript', index=index)
def translations(self):
"""
Extract translations
Returns:
list: list of translation language codes
"""
translations_selector = '.metadata-video-translations .remove-setting'
return self.q(css=translations_selector).attrs('data-lang')
def download_translation(self, language_code, text_to_search):
"""
Download a translation having `language_code` and containing `text_to_search`
Arguments:
language_code (str): language code
text_to_search (str): text to search in translation
Returns:
bool: whether download was successful
"""
mime_type = 'application/x-subrip'
lang_code = '/{}?'.format(language_code)
link = [link for link in self.q(css='.download-action').attrs('href') if lang_code in link]
result, headers, content = self._get_transcript(link[0])
return result is True and mime_type in headers['content-type'] and text_to_search in content.decode('utf-8')
def remove_translation(self, language_code):
"""
Remove a translation having `language_code`
Arguments:
language_code (str): language code
"""
self.q(css='.remove-action').filter(lambda el: language_code == el.get_attribute('data-lang')).click()
@property
def upload_status_message(self):
"""
Get asset upload status message
"""
return self.q(css='#upload_error').text[0]
def captions_lines(self):
"""
Extract partial caption lines.
As all the captions lines are exactly same so only getting partial lines will work.
"""
self.wait_for_captions()
selector = '.subtitles > li:nth-child({})'
return ' '.join([self.q(css=selector.format(i)).text[0] for i in range(1, 6)])
def set_url_field(self, url, field_number):
"""
Set video url field in basic settings tab.
Arguments:
url (str): video url
field_number (int): video url field number
"""
if self.q(css=CLASS_SELECTORS['collapse_bar']).visible is False:
self.click_button('collapse_link')
self.q(css=CLASS_SELECTORS['url_inputs']).nth(field_number - 1).fill(url)
time.sleep(DELAY)
self.wait_for_ajax()
def message(self, message_type):
"""
Get video url field status/error message.
Arguments:
message_type(str): type(status, error) of message
Returns:
str: status/error message
"""
if message_type == 'status':
self.wait_for_element_visibility(CLASS_SELECTORS[message_type],
'{} message is Visible'.format(message_type.title()))
return self.q(css=CLASS_SELECTORS[message_type]).text[0]
def url_field_status(self, *field_numbers):
"""
Get video url field status(enable/disable).
Arguments:
url (str): video url
field_numbers (tuple or None): field numbers to check status for, None means get status for all.
tuple items will be integers and must start from 1
Returns:
dict: field numbers as keys and field status(bool) as values, False means a field is disabled
"""
if field_numbers:
index_list = [number - 1 for number in field_numbers]
else:
index_list = range(3) # maximum three fields
statuses = {}
for index in index_list:
status = 'is-disabled' not in self.q(css=CLASS_SELECTORS['url_inputs']).nth(index).attrs('class')[0]
statuses[index + 1] = status
return statuses
def clear_field(self, index):
"""
Clear a video url field at index specified by `index`.
"""
self.q(css=CLASS_SELECTORS['url_inputs']).nth(index - 1).fill('')
# Trigger an 'input' event after filling the field with an empty value.
self.browser.execute_script(
"$('{}:eq({})').trigger('{}')".format(CLASS_SELECTORS['url_inputs'], index, 'input'))
time.sleep(DELAY)
self.wait_for_ajax()
def clear_fields(self):
"""
Clear video url fields.
"""
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.val('')
.trigger('input');
""".format(selector=CLASS_SELECTORS['url_inputs'])
self.browser.execute_script(script)
time.sleep(DELAY)
self.wait_for_ajax()
def revert_field(self, field_name):
"""
Revert a field.
"""
_, setting = self._get_setting_entry(field_name)
setting.find_element_by_class_name('setting-clear').click()
def is_transcript_button_visible(self, button_name, index=0, button_text=None):
"""
Check if a transcript related button is visible.
Arguments:
button_name (str): name of button
index (int): query index
button_text (str or None): text to match with text on a button, if None then don't match texts
Returns:
bool: is button visible
"""
is_visible = self.q(css=BUTTON_SELECTORS[button_name]).nth(index).visible
is_text_matched = True
if button_text and button_text != self.q(css=BUTTON_SELECTORS[button_name]).nth(index).text[0]:
is_text_matched = False
return is_visible and is_text_matched
def upload_transcript(self, transcript_filename):
"""
Upload a Transcript
Arguments:
transcript_filename (str): name of transcript file
"""
# Show the Browse Button
self.browser.execute_script("$('form.file-chooser').show()")
asset_file_path = self.file_path(transcript_filename)
self.q(css=CLASS_SELECTORS['attach_transcript']).results[0].send_keys(asset_file_path)
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['attach_transcript']).visible, 'Upload Completed')
| agpl-3.0 |
tiborsimko/invenio-ext | invenio_ext/logging/backends/fs.py | 5 | 2497 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Rotating file log handler for writing logs to the file system.
**Configuration**
======================== ======================================================
`LOGGING_FS_BACKUPCOUNT` Number of files to keep. **Default:** ``5``.
`LOGGING_FS_MAXBYTES` Max file size in bytes. **Default:** ``104857600``
(100 MB).
`LOGGING_FS_LEVEL` Log level threshold for handler. **Default:**
``WARNING``.
======================== ======================================================
"""
from __future__ import absolute_import
import logging
import os
from logging.handlers import RotatingFileHandler
def setup_app(app):
"""Filesystem logging handler."""
app.config.setdefault('LOGGING_FS_BACKUPCOUNT', 5)
app.config.setdefault('LOGGING_FS_MAXBYTES', 104857600) # 100mb
app.config.setdefault(
'LOGGING_FS_LEVEL',
'DEBUG' if app.debug else 'WARNING'
)
# Create log directory if it does not exists
try:
os.makedirs(
os.path.join(app.instance_path, app.config.get('CFG_LOGDIR', ''))
)
except Exception:
pass
handler = RotatingFileHandler(
os.path.join(
app.instance_path,
app.config.get('CFG_LOGDIR', ''),
app.logger_name + '.log'
),
backupCount=app.config['LOGGING_FS_BACKUPCOUNT'],
maxBytes=app.config['LOGGING_FS_MAXBYTES']
)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
))
handler.setLevel(app.config['LOGGING_FS_LEVEL'])
# Add handler to application logger
app.logger.addHandler(handler)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.