gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import xmlrpclib
import ooop
from datetime import timedelta
from sact.epoch import Time, TzLocal, UTC
from kids.cache import cache
OOOP_NAME_TAG_LIKE_EXPR = "{%s}%%"
LoginFailed = ooop.LoginFailed
## XXXvlab: should propose a modification to OOOP code to get this
## function accessible outside from an instanced object.
def ooop_normalize_model_name(name):
"""Normalize model name for OOOP attribute access"""
return "".join(["%s" % k.capitalize() for k in name.split('.')])
def xmlid2tuple(xmlid, default_module=None):
if "." in xmlid:
return tuple(xmlid.split('.', 1))
return default_module, xmlid
def tuple2xmlid((module, local_id), default_module=None):
if module is None or (default_module and module == default_module):
return local_id
else:
return "%s.%s" % (module, local_id)
def obj2dct(obj):
"""Gets simple displayable fields of obj in a dict"""
dct = dict((k, getattr(obj, k)) for k, d in obj.fields.iteritems()
if "2" not in d['ttype'])
dct["id"] = obj._ref
return dct
def _date_from_find_spec(spec, lapse_type):
now = Time.now()
count = float(spec[1:] if spec.startswith("+") or spec.startswith("-") \
else spec)
boundary = now - timedelta(**{lapse_type: int(count)})
if spec.startswith("+"):
return None, boundary
elif spec.startswith("-"):
return boundary, None
else:
return boundary, boundary + timedelta(**{lapse_type: 1})
def _ooop_filter_from_date_bounds(field_name, start, end):
filters = {}
if start is not None:
filters['%s__gt' % field_name] = start.astimezone(UTC())\
.strftime('%Y-%m-%d %H:%M:%S')
if end is not None:
filters['%s__lt' % field_name] = end.astimezone(UTC())\
.strftime('%Y-%m-%d %H:%M:%S')
return filters
def _read_string_date(str_date):
formats = ["%Y-%m-%d",
"%Y-%m-%d %H:%M",
"%Y-%m-%d %H:%M:%s",
"%m-%d",
"%m/%d",
"%m-%d %H:%M:%s",
"%m-%d %H:%M",
"%H:%M:%s",
"%H:%M",
]
for f in formats:
try:
return Time.strptime(str_date, f, TzLocal(), relative=True)
except ValueError:
pass
raise ValueError("No format seems to know how to parse your string %r"
% (str_date))
def build_filters(opt_filters):
filters = {}
for label, value in opt_filters.iteritems():
if not value:
continue
if (label.startswith('m') or label.startswith('c')) and \
label[1:] in ["hours", "minutes", "days"]:
date_from, date_to = _date_from_find_spec(value, label[1:])
oe_date_field = "write_date" if label.startswith('m') \
else "create_date"
filters.update(_ooop_filter_from_date_bounds(oe_date_field,
date_from, date_to))
elif label == "model":
filters["model"] = value
elif label == "name":
filters["name__like"] = value
elif label == "tag":
filters["name__like"] = OOOP_NAME_TAG_LIKE_EXPR % value
elif label == "since":
date = _read_string_date(value)
filters.update(_ooop_filter_from_date_bounds(
"write_date", date, None))
elif label == "nid":
filters["id"] = value
else:
filters[label] = value
return filters
class OOOPExtended(object):
"""Adds some shortcuts to ooop"""
def __init__(self, *args, **kwargs):
self._ooop = ooop.OOOP(*args, **kwargs)
def model_exists(self, model):
"""Return true if model exists in distant OOOP database"""
return len(self._ooop.search("ir.model", [], limit=1)) != 0
@cache
def get_model(self, model):
"""Return OOOP Model object specified in the openerp style model
It avoids using the CamelCased ooop style of referencing models.
"""
return getattr(self._ooop, ooop_normalize_model_name(model))
@cache
def get_fields(self, model):
"""Return fields dict of current model"""
if model in self._ooop.fields.keys():
return self._ooop.fields[model]
odoo_fields = self.get_model(model).fields_get()
fields = {}
for field_name, field in odoo_fields.items():
field['name'] = field_name
field['relation'] = field.get('relation', False)
field['ttype'] = field['type']
del field['type']
fields[field_name] = field
self._ooop.fields[model] = fields
return fields
def get_object(self, model, object_id):
"""Return OOOP Instance object using OpenERP model name
It avoids using the CamelCased ooop style of referencing models.
"""
return self.get_model(model).get(int(object_id))
def write(self, *args, **kwargs):
try:
res = self._ooop.write(*args, **kwargs)
except xmlrpclib.Fault, e:
raise Exception(
"OpenERP write error:\n" +
('\n'.join(
[" | " + line for line in e.faultString.split('\n')])))
return res
def get_object_by_xmlid(self, (module, xml_id)):
"""Return OOOP Instance object using XMLid
"""
imd = self.get_model("ir.model.data")
lookup = imd.filter(module=module, name=xml_id)
if len(lookup) == 0:
return None
lookup = lookup[0]
return self.get_object(lookup.model, lookup.res_id)
## XXXvlab: should be a method of an OOOP object instance
def get_xml_id(self, model, object_id):
"""Return module, xml_id of given object specified by its model and id.
It avoids using the CamelCased ooop style of referencing models.
Returns None, if there are no xml_id associated to this object.
"""
imd = self.get_model("ir.model.data")
lookup = imd.filter(model=model, res_id=int(object_id))
if len(lookup) == 0:
return None
lookup = lookup[0]
return lookup.module, lookup.name
## XXXvlab: should be a method of an OOOP object instance
def set_xml_id(self, model, object_id, (module, xml_id)):
imd = self.get_model("ir.model.data")
ir_model_data = imd.new(res_id=object_id, module=module, name=xml_id)
ir_model_data.model = model
ir_model_data.save()
def simple_filters(self, model, **kwargs):
"""Alternative syntax to OOOP filter
These are simpler and dumber than the OOOP syntax. They exists
to draw a line with CLI arguments for instances.
They introduce some shortcuts as 'since'...
"""
filters = build_filters(kwargs)
return self.get_model(model).filter(**filters)
def get_all_d(self, model, domain, order=None, limit=None, offset=0,
fields=[]):
ids = self._ooop.search(model, domain, order=order,
limit=limit, offset=offset)
return self._ooop.read(model, ids, fields=fields)
def version(self):
return tuple(self._ooop.commonsock.version()['server_version_info'])
| |
# -*- coding: utf-8 -*-
#
# SelfTest/Signature/test_pkcs1_pss.py: Self-test for PKCS#1 PSS signatures
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Hash import *
from Crypto.Signature import PKCS1_PSS as PKCS
from Crypto.Util.py3compat import *
def isStr(s):
t = ''
try:
t += s
except TypeError:
return 0
return 1
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\t', '\n', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
# Helper class to count how many bytes have been requested
# from the key's private RNG, w/o counting those used for blinding
class MyKey:
def __init__(self, key):
self._key = key
self.n = key.n
self.asked = 0
def _randfunc(self, N):
self.asked += N
return self._key._randfunc(N)
def sign(self, m):
return self._key.sign(m)
def has_private(self):
return self._key.has_private()
def decrypt(self, m):
return self._key.decrypt(m)
def verify(self, m, p):
return self._key.verify(m, p)
def encrypt(self, m, p):
return self._key.encrypt(m, p)
class PKCS1_PSS_Tests(unittest.TestCase):
# List of tuples with test data for PKCS#1 PSS
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: data to hash and sign
# Item #2: signature of the data #1, done with the key #0,
# and salt #3 after hashing it with #4
# Item #3: salt
# Item #4: hash object generator
_testData = (
#
# From in pss-vect.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a2 ba 40 ee 07 e3 b2 bd 2f 02 ce 22 7f 36 a1 95
02 44 86 e4 9c 19 cb 41 bb bd fb ba 98 b2 2b 0e
57 7c 2e ea ff a2 0d 88 3a 76 e6 5e 39 4c 69 d4
b3 c0 5a 1e 8f ad da 27 ed b2 a4 2b c0 00 fe 88
8b 9b 32 c2 2d 15 ad d0 cd 76 b3 e7 93 6e 19 95
5b 22 0d d1 7d 4e a9 04 b1 ec 10 2b 2e 4d e7 75
12 22 aa 99 15 10 24 c7 cb 41 cc 5e a2 1d 00 ee
b4 1f 7c 80 08 34 d2 c6 e0 6b ce 3b ce 7e a9 a5''',
'e':'''01 00 01''',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''50e2c3e38d886110288dfc68a9533e7e12e27d2aa56
d2cdb3fb6efa990bcff29e1d2987fb711962860e7391b1ce01
ebadb9e812d2fbdfaf25df4ae26110a6d7a26f0b810f54875e
17dd5c9fb6d641761245b81e79f8c88f0e55a6dcd5f133abd3
5f8f4ec80adf1bf86277a582894cb6ebcd2162f1c7534f1f49
47b129151b71'''
},
# Data to sign
'''85 9e ef 2f d7 8a ca 00 30 8b dc 47 11 93 bf 55
bf 9d 78 db 8f 8a 67 2b 48 46 34 f3 c9 c2 6e 64
78 ae 10 26 0f e0 dd 8c 08 2e 53 a5 29 3a f2 17
3c d5 0c 6d 5d 35 4f eb f7 8b 26 02 1c 25 c0 27
12 e7 8c d4 69 4c 9f 46 97 77 e4 51 e7 f8 e9 e0
4c d3 73 9c 6b bf ed ae 48 7f b5 56 44 e9 ca 74
ff 77 a5 3c b7 29 80 2f 6e d4 a5 ff a8 ba 15 98
90 fc''',
# Signature
'''8d aa 62 7d 3d e7 59 5d 63 05 6c 7e c6 59 e5 44
06 f1 06 10 12 8b aa e8 21 c8 b2 a0 f3 93 6d 54
dc 3b dc e4 66 89 f6 b7 95 1b b1 8e 84 05 42 76
97 18 d5 71 5d 21 0d 85 ef bb 59 61 92 03 2c 42
be 4c 29 97 2c 85 62 75 eb 6d 5a 45 f0 5f 51 87
6f c6 74 3d ed dd 28 ca ec 9b b3 0e a9 9e 02 c3
48 82 69 60 4f e4 97 f7 4c cd 7c 7f ca 16 71 89
71 23 cb d3 0d ef 5d 54 a2 b5 53 6a d9 0a 74 7e''',
# Salt
'''e3 b5 d5 d0 02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8
3b ce 7e 61''',
# Hash algorithm
SHA
),
#
# Example 1.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''cd c8 7d a2 23 d7 86 df 3b 45 e0 bb bc 72 13 26
d1 ee 2a f8 06 cc 31 54 75 cc 6f 0d 9c 66 e1 b6
23 71 d4 5c e2 39 2e 1a c9 28 44 c3 10 10 2f 15
6a 0d 8d 52 c1 f4 c4 0b a3 aa 65 09 57 86 cb 76
97 57 a6 56 3b a9 58 fe d0 bc c9 84 e8 b5 17 a3
d5 f5 15 b2 3b 8a 41 e7 4a a8 67 69 3f 90 df b0
61 a6 e8 6d fa ae e6 44 72 c0 0e 5f 20 94 57 29
cb eb e7 7f 06 ce 78 e0 8f 40 98 fb a4 1f 9d 61
93 c0 31 7e 8b 60 d4 b6 08 4a cb 42 d2 9e 38 08
a3 bc 37 2d 85 e3 31 17 0f cb f7 cc 72 d0 b7 1c
29 66 48 b3 a4 d1 0f 41 62 95 d0 80 7a a6 25 ca
b2 74 4f d9 ea 8f d2 23 c4 25 37 02 98 28 bd 16
be 02 54 6f 13 0f d2 e3 3b 93 6d 26 76 e0 8a ed
1b 73 31 8b 75 0a 01 67 d0''',
# Signature
'''90 74 30 8f b5 98 e9 70 1b 22 94 38 8e 52 f9 71
fa ac 2b 60 a5 14 5a f1 85 df 52 87 b5 ed 28 87
e5 7c e7 fd 44 dc 86 34 e4 07 c8 e0 e4 36 0b c2
26 f3 ec 22 7f 9d 9e 54 63 8e 8d 31 f5 05 12 15
df 6e bb 9c 2f 95 79 aa 77 59 8a 38 f9 14 b5 b9
c1 bd 83 c4 e2 f9 f3 82 a0 d0 aa 35 42 ff ee 65
98 4a 60 1b c6 9e b2 8d eb 27 dc a1 2c 82 c2 d4
c3 f6 6c d5 00 f1 ff 2b 99 4d 8a 4e 30 cb b3 3c''',
# Salt
'''de e9 59 c7 e0 64 11 36 14 20 ff 80 18 5e d5 7f
3e 67 76 af''',
# Hash
SHA
),
#
# Example 1.2 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''85 13 84 cd fe 81 9c 22 ed 6c 4c cb 30 da eb 5c
f0 59 bc 8e 11 66 b7 e3 53 0c 4c 23 3e 2b 5f 8f
71 a1 cc a5 82 d4 3e cc 72 b1 bc a1 6d fc 70 13
22 6b 9e''',
# Signature
'''3e f7 f4 6e 83 1b f9 2b 32 27 41 42 a5 85 ff ce
fb dc a7 b3 2a e9 0d 10 fb 0f 0c 72 99 84 f0 4e
f2 9a 9d f0 78 07 75 ce 43 73 9b 97 83 83 90 db
0a 55 05 e6 3d e9 27 02 8d 9d 29 b2 19 ca 2c 45
17 83 25 58 a5 5d 69 4a 6d 25 b9 da b6 60 03 c4
cc cd 90 78 02 19 3b e5 17 0d 26 14 7d 37 b9 35
90 24 1b e5 1c 25 05 5f 47 ef 62 75 2c fb e2 14
18 fa fe 98 c2 2c 4d 4d 47 72 4f db 56 69 e8 43''',
# Salt
'''ef 28 69 fa 40 c3 46 cb 18 3d ab 3d 7b ff c9 8f
d5 6d f4 2d''',
# Hash
SHA
),
#
# Example 2.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 d4 0c 1b cf 97 a6 8a e7 cd bd 8a 7b f3 e3 4f
a1 9d cc a4 ef 75 a4 74 54 37 5f 94 51 4d 88 fe
d0 06 fb 82 9f 84 19 ff 87 d6 31 5d a6 8a 1f f3
a0 93 8e 9a bb 34 64 01 1c 30 3a d9 91 99 cf 0c
7c 7a 8b 47 7d ce 82 9e 88 44 f6 25 b1 15 e5 e9
c4 a5 9c f8 f8 11 3b 68 34 33 6a 2f d2 68 9b 47
2c bb 5e 5c ab e6 74 35 0c 59 b6 c1 7e 17 68 74
fb 42 f8 fc 3d 17 6a 01 7e dc 61 fd 32 6c 4b 33
c9''',
'e':'''01 00 01''',
'd':'''02 7d 14 7e 46 73 05 73 77 fd 1e a2 01 56 57 72
17 6a 7d c3 83 58 d3 76 04 56 85 a2 e7 87 c2 3c
15 57 6b c1 6b 9f 44 44 02 d6 bf c5 d9 8a 3e 88
ea 13 ef 67 c3 53 ec a0 c0 dd ba 92 55 bd 7b 8b
b5 0a 64 4a fd fd 1d d5 16 95 b2 52 d2 2e 73 18
d1 b6 68 7a 1c 10 ff 75 54 5f 3d b0 fe 60 2d 5f
2b 7f 29 4e 36 01 ea b7 b9 d1 ce cd 76 7f 64 69
2e 3e 53 6c a2 84 6c b0 c2 dd 48 6a 39 fa 75 b1'''
},
# Message
'''da ba 03 20 66 26 3f ae db 65 98 48 11 52 78 a5
2c 44 fa a3 a7 6f 37 51 5e d3 36 32 10 72 c4 0a
9d 9b 53 bc 05 01 40 78 ad f5 20 87 51 46 aa e7
0f f0 60 22 6d cb 7b 1f 1f c2 7e 93 60''',
# Signature
'''01 4c 5b a5 33 83 28 cc c6 e7 a9 0b f1 c0 ab 3f
d6 06 ff 47 96 d3 c1 2e 4b 63 9e d9 13 6a 5f ec
6c 16 d8 88 4b dd 99 cf dc 52 14 56 b0 74 2b 73
68 68 cf 90 de 09 9a db 8d 5f fd 1d ef f3 9b a4
00 7a b7 46 ce fd b2 2d 7d f0 e2 25 f5 46 27 dc
65 46 61 31 72 1b 90 af 44 53 63 a8 35 8b 9f 60
76 42 f7 8f ab 0a b0 f4 3b 71 68 d6 4b ae 70 d8
82 78 48 d8 ef 1e 42 1c 57 54 dd f4 2c 25 89 b5
b3''',
# Salt
'''57 bf 16 0b cb 02 bb 1d c7 28 0c f0 45 85 30 b7
d2 83 2f f7''',
SHA
),
#
# Example 8.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f''',
'e':'''01 00 01''',
'd':'''6c 66 ff e9 89 80 c3 8f cd ea b5 15 98 98 83 61
65 f4 b4 b8 17 c4 f6 a8 d4 86 ee 4e a9 13 0f e9
b9 09 2b d1 36 d1 84 f9 5f 50 4a 60 7e ac 56 58
46 d2 fd d6 59 7a 89 67 c7 39 6e f9 5a 6e ee bb
45 78 a6 43 96 6d ca 4d 8e e3 de 84 2d e6 32 79
c6 18 15 9c 1a b5 4a 89 43 7b 6a 61 20 e4 93 0a
fb 52 a4 ba 6c ed 8a 49 47 ac 64 b3 0a 34 97 cb
e7 01 c2 d6 26 6d 51 72 19 ad 0e c6 d3 47 db e9'''
},
# Message
'''81 33 2f 4b e6 29 48 41 5e a1 d8 99 79 2e ea cf
6c 6e 1d b1 da 8b e1 3b 5c ea 41 db 2f ed 46 70
92 e1 ff 39 89 14 c7 14 25 97 75 f5 95 f8 54 7f
73 56 92 a5 75 e6 92 3a f7 8f 22 c6 99 7d db 90
fb 6f 72 d7 bb 0d d5 74 4a 31 de cd 3d c3 68 58
49 83 6e d3 4a ec 59 63 04 ad 11 84 3c 4f 88 48
9f 20 97 35 f5 fb 7f da f7 ce c8 ad dc 58 18 16
8f 88 0a cb f4 90 d5 10 05 b7 a8 e8 4e 43 e5 42
87 97 75 71 dd 99 ee a4 b1 61 eb 2d f1 f5 10 8f
12 a4 14 2a 83 32 2e db 05 a7 54 87 a3 43 5c 9a
78 ce 53 ed 93 bc 55 08 57 d7 a9 fb''',
# Signature
'''02 62 ac 25 4b fa 77 f3 c1 ac a2 2c 51 79 f8 f0
40 42 2b 3c 5b af d4 0a 8f 21 cf 0f a5 a6 67 cc
d5 99 3d 42 db af b4 09 c5 20 e2 5f ce 2b 1e e1
e7 16 57 7f 1e fa 17 f3 da 28 05 2f 40 f0 41 9b
23 10 6d 78 45 aa f0 11 25 b6 98 e7 a4 df e9 2d
39 67 bb 00 c4 d0 d3 5b a3 55 2a b9 a8 b3 ee f0
7c 7f ec db c5 42 4a c4 db 1e 20 cb 37 d0 b2 74
47 69 94 0e a9 07 e1 7f bb ca 67 3b 20 52 23 80
c5''',
# Salt
'''1d 65 49 1d 79 c8 64 b3 73 00 9b e6 f6 f2 46 7b
ac 4c 78 fa''',
SHA
)
)
def testSign1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e','d') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
key._randfunc = lambda N: test_salt
# The real test
signer = PKCS.new(key)
self.assertTrue(signer.can_sign())
s = signer.sign(h)
self.assertEqual(s, t2b(self._testData[i][2]))
def testVerify1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
# The real test
key._randfunc = lambda N: test_salt
verifier = PKCS.new(key)
self.assertFalse(verifier.can_sign())
result = verifier.verify(h, t2b(self._testData[i][2]))
self.assertTrue(result)
def testSignVerify(self):
h = SHA.new()
h.update(b('blah blah blah'))
rng = Random.new().read
key = MyKey(RSA.generate(1024,rng))
# Helper function to monitor what's request from MGF
global mgfcalls
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
# Verify that PSS is friendly to all ciphers
for hashmod in (MD2,MD5,SHA,SHA224,SHA256,SHA384,RIPEMD):
h = hashmod.new()
h.update(b('blah blah blah'))
# Verify that sign() asks for as many random bytes
# as the hash output size
key.asked = 0
signer = PKCS.new(key)
s = signer.sign(h)
self.assertTrue(signer.verify(h, s))
self.assertEqual(key.asked, h.digest_size)
h = SHA.new()
h.update(b('blah blah blah'))
# Verify that sign() uses a different salt length
for sLen in (0,3,21):
key.asked = 0
signer = PKCS.new(key, saltLen=sLen)
s = signer.sign(h)
self.assertEqual(key.asked, sLen)
self.assertTrue(signer.verify(h, s))
# Verify that sign() uses the custom MGF
mgfcalls = 0
signer = PKCS.new(key, newMGF)
s = signer.sign(h)
self.assertEqual(mgfcalls, 1)
self.assertTrue(signer.verify(h, s))
# Verify that sign() does not call the RNG
# when salt length is 0, even when a new MGF is provided
key.asked = 0
mgfcalls = 0
signer = PKCS.new(key, newMGF, 0)
s = signer.sign(h)
self.assertEqual(key.asked,0)
self.assertEqual(mgfcalls, 1)
self.assertTrue(signer.verify(h, s))
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_PSS_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import campaign_audience_view
from google.ads.googleads.v8.services.types import (
campaign_audience_view_service,
)
from .transports.base import (
CampaignAudienceViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CampaignAudienceViewServiceGrpcTransport
class CampaignAudienceViewServiceClientMeta(type):
"""Metaclass for the CampaignAudienceViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CampaignAudienceViewServiceTransport]]
_transport_registry["grpc"] = CampaignAudienceViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CampaignAudienceViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CampaignAudienceViewServiceClient(
metaclass=CampaignAudienceViewServiceClientMeta
):
"""Service to manage campaign audience views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignAudienceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignAudienceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CampaignAudienceViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
CampaignAudienceViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def campaign_audience_view_path(
customer_id: str, campaign_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified campaign_audience_view string."""
return "customers/{customer_id}/campaignAudienceViews/{campaign_id}~{criterion_id}".format(
customer_id=customer_id,
campaign_id=campaign_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_campaign_audience_view_path(path: str) -> Dict[str, str]:
"""Parse a campaign_audience_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/campaignAudienceViews/(?P<campaign_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, CampaignAudienceViewServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the campaign audience view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CampaignAudienceViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CampaignAudienceViewServiceTransport):
# transport is a CampaignAudienceViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CampaignAudienceViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_campaign_audience_view(
self,
request: campaign_audience_view_service.GetCampaignAudienceViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> campaign_audience_view.CampaignAudienceView:
r"""Returns the requested campaign audience view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetCampaignAudienceViewRequest`):
The request object. Request message for
[CampaignAudienceViewService.GetCampaignAudienceView][google.ads.googleads.v8.services.CampaignAudienceViewService.GetCampaignAudienceView].
resource_name (:class:`str`):
Required. The resource name of the
campaign audience view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.CampaignAudienceView:
A campaign audience view.
Includes performance data from interests
and remarketing lists for Display
Network and YouTube Network ads, and
remarketing lists for search ads (RLSA),
aggregated by campaign and audience
criterion. This view only includes
audiences attached at the campaign
level.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a campaign_audience_view_service.GetCampaignAudienceViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
campaign_audience_view_service.GetCampaignAudienceViewRequest,
):
request = campaign_audience_view_service.GetCampaignAudienceViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_campaign_audience_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("CampaignAudienceViewServiceClient",)
| |
#!/usr/bin/python
# get data from net
import urllib.request
# parse data
import re
# hash texts to check if it's new or already parsed
import hashlib
# logging
import logging
import sys
# file system: write .txt files to folder
import os.path
# command line
import argparse
import webbrowser
# logging
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler(sys.stdout))
#LOG.setLevel(logging.DEBUG)
# statics
# find a link to a text content page
TEXT_LINK_RE = re.compile(r'<a class="fancybox" data-fancybox-type="iframe" data-fancybox-autosize="true" onclick=".*" href="(.*)"></a>')
# find content of a text page -- stuff nested in the body
TEXT_CONTENT_RE = re.compile(r'<body>(.*)</body>')
# find a headline in the body of text content (h1 or h2)
TEXT_CONTENT_HEADLINE_RE = re.compile(r'<h([12])>(.*?)</h\1>')
ENCODING = 'utf-8'
WEB_BASE = 'https://bc-v2.pressmatrix.com'
SELECTION_PAGE = WEB_BASE + '/de/profiles/b3b32e362f93/editions'
def get_web_text(link):
'''
Return text content from given link.
:return str: request answer in one big string
'''
# generate Request
req = urllib.request.Request(
link,
headers={'User-Agent': "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0"})
# send Request
with urllib.request.urlopen(req) as response:
html = response.read()
text = html.decode(ENCODING)
return text
class Text():
'''
Represent one Text in a MPF magazine.
'''
def __init__(self, text_link):
self._content = None
self._headline = None
self._hash_value = None # cache value
html = get_web_text(text_link)
match = TEXT_CONTENT_RE.search(html)
if match:
self._content = match.group(1)
h_match = TEXT_CONTENT_HEADLINE_RE.search(self._content)
if h_match:
self._headline = h_match.group(2)
def get_hash(self):
'''
Return md5 hex hash of content (without html tags or spaces)
'''
if not self._hash_value:
checksum = hashlib.md5()
text = re.sub(r'<.+?>', '', self._content)
text = re.sub(r'\s', '', text)
checksum.update(text.encode())
self._hash_value = checksum.hexdigest()
return self._hash_value
def get_headline(self):
'''
Return the headline
'''
return self._headline
def get_content_txt(self):
'''
Return the content of the article as one big string with newlines.
'''
cont = self._content
# newlines after heading
cont = Text._subst_tag('h[12]', '', cont)
cont = Text._subst_tag('/h[12]', '\n\n\n', cont)
# substitute newlines
cont = Text._subst_tag('br', '\n\n', cont)
cont = Text._subst_tag('/p', '\n\n', cont)
cont = Text._subst_tag('p', '', cont)
# remove all other html tags
cont = Text._subst_tag('.*?', '', cont)
return cont
@staticmethod
def _subst_tag(tag, repl, string):
'''
Remove tag with surrounding spaces from string.
'''
return re.sub(r' *<{}> *'.format(tag), repl, string)
def is_article(self):
'''
Return True iff the text is a `proper' article (i.e. not a caption etc.)
'''
return (self._headline is not None and len(self._headline) > 2) and ((len(self._content) - len(self._headline)) > 30)
def __str__(self):
if self._headline:
return '{} ({})'.format(self._headline, self.get_hash()[0:6])
else:
if self._content:
return 'Unknown headline; content begins with {}'.format(self._content[0:50])
else:
return 'Empty text'
class Magazine():
'''
Represent one MPF magazine.
'''
def __init__(self, base_link):
# all texts in this magazine
self._texts = []
# the hashes of the texts -- use this set to check, whether a certain
# text is already in _texts.
self._text_hashes = set()
self._parse_magazine(base_link)
def _parse_magazine(self, base_link):
'''
Iterate over pages in the magazine and get all texts.
'''
LOG.info('Going to parse all pages of magazine at {}'.format(base_link))
for page_num in range(1, 500):
try:
self._parse_page(base_link + '/pages/page/{}'.format(page_num))
LOG.info(' Page {:3} done, currently {:3} articles'.format(page_num, len(self._texts)))
except urllib.error.HTTPError as ex:
if str(ex).strip() == 'HTTP Error 500: Internal Server Error':
LOG.debug('page behind last page is number {}'.format(page_num))
break
LOG.info('Found {} articles in the magazine'.format(len(self._texts)))
def _parse_page(self, base_link):
'''
Parse one page.
'''
LOG.debug('Going to look for page {}'.format(base_link))
html = get_web_text(base_link)
# parse answer
# Note: unfortunately, it's not possible to use xml.etree.ElementTree
# because the html is not xml well-formed :-(
#root = ET.fromstring(html)
#root.findall('.//a[@class="fancybox" and @data-fancybox-type="iframe" and @data-fancybox-autosize="true" and @href]')
# parse using regexp
for line in html.split('\n'):
match = TEXT_LINK_RE.search(line.strip())
if match:
text_link = WEB_BASE + match.group(1)
text = Text(text_link)
if text.is_article():
text_hash = text.get_hash()
LOG.debug('Found text {}'.format(text))
if text_hash not in self._text_hashes:
LOG.debug('Text is new; adding it to the magazine.')
self._texts.append(text)
self._text_hashes.add(text_hash)
def to_txt(self, folder):
'''
Generate .txt files from each article to the given folder.
:param folder str: path to a folder where to put the .txt files.
'''
abspath_dir = os.path.abspath(folder)
if os.path.isdir(abspath_dir):
counter = 0
for text in self._texts:
counter += 1
title_clean = re.sub(r'[^\w]', '', re.sub(r'\s+', '_', text.get_headline()))
filename = '{:03}_{}.txt'.format(counter, title_clean)
path = os.path.join(abspath_dir, filename)
LOG.debug('Writing text {} to file {}'.format(text, path))
with open(path, 'w') as txt_file:
txt_file.write(text.get_content_txt())
else:
LOG.error('Unable to generate .txt files to folder {} ({}) which was not found.'.format(folder, abspath))
def magazine_to_txt_files(base_address, directory):
'''
Get all texts from the given base address and generate text files under directory.
'''
if not os.path.isdir(directory):
LOG.info('Creating directory {}'.format(directory))
os.makedirs(directory)
if not os.path.isdir(directory):
LOG.fatal('Unable to create directory {} (target for txt files).'.format(directory))
raise SystemExit(1)
if not re.search(WEB_BASE, base_address):
LOG.fatal('Web base address is hard coded to {} and differs from {}'.format(
WEB_BASE, base_address))
raise SystemExit(2)
mag = Magazine(base_address)
mag.to_txt(directory)
def open_browser_selection():
'''
Open browser at selection page.
'''
webbrowser.open(SELECTION_PAGE)
# main entry point
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download MaxPlanckForschung texts')
parser.add_argument('base_address', type=str,
help='Base address of the MPF issue. You get this by selecting an issue on {} and then copy the address. Type "help" here to open firefox to go to the selection page.'.format(SELECTION_PAGE))
parser.add_argument('--out-type', '-t', type=str, required=False,
dest='out_type', default='txt',
help='Type in which to store the MPF issue.')
parser.add_argument('--out-dir', '-o', type=str, required=False,
dest='out_dir', default='/tmp/MPF',
help='Directory where to store the output files.')
parser.add_argument('--debug',
action='store_const', dest='debug', const=True, default=False,
help='Give verbose (debug) output.')
args = parser.parse_args()
if args.debug:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.INFO)
if args.base_address == 'help':
open_browser_selection()
raise SystemExit
if args.out_type == 'txt':
magazine_to_txt_files(args.base_address, args.out_dir)
else:
LOG.fatal('Unknown output format {}'.format(args.out_type))
raise SystemExit(3)
| |
"""Strategies for reading from & writing to config files."""
import logging
import os
from abc import ABCMeta, abstractmethod
from configparser import NoSectionError, RawConfigParser
from jsun import Decoder, DecodeError, Encoder
from .exc import SettingsFileNotFoundError, SettingsFileSectionNotFoundError
from .util import parse_file_name_and_section
__all__ = [
"Strategy",
"INIStrategy",
"INIJSONStrategy",
]
log = logging.getLogger(__name__)
class RawValue(str):
"""Marker for values that couldn't be decoded when reading."""
class Strategy(metaclass=ABCMeta):
file_types = ()
@abstractmethod
def get_defaults(self, file_name):
"""Get default settings from file.
Returns:
- Settings from the default section.
"""
@abstractmethod
def read_section(self, file_name, section=None):
"""Read settings from specified ``section`` of config file.
This is where the strategy-specific file-reading logic goes.
Returns:
- Settings from the specified section or an empty dict if
the section isn't present.
- Whether the section is present.
"""
@abstractmethod
def write_settings(self, settings, file_name, section=None):
"""Write settings to file."""
def parse_file_name_and_section(
self,
file_name,
section=None,
extender=None,
extender_section=None,
):
"""Parse file name and (maybe) section.
Delegates to :func:`.util.parse_file_name_and_section` to parse
the file name and section. If that function doesn't find a
section, this method should return the default section for the
strategy via :meth:`get_default_section` (if applicable).
"""
file_name, section = parse_file_name_and_section(
file_name,
section,
extender,
extender_section,
)
if section is None:
section = self.get_default_section(file_name)
return file_name, section
def read_file(self, file_name, section=None, _finalize=True):
"""Read settings from config file."""
if _finalize:
file_name, section = self.parse_file_name_and_section(file_name, section)
if not os.path.isfile(file_name):
raise SettingsFileNotFoundError(file_name)
settings = {}
result = self._read_one_file(file_name, section)
file_settings, section_present, extends, extends_section = result
if extends:
if extends != file_name:
result = self.read_file(extends, extends_section, _finalize=False)
extends_settings, extends_section_present = result
section_present = section_present or extends_section_present
settings.update(extends_settings)
settings.update(file_settings)
if _finalize:
if not section_present:
raise SettingsFileSectionNotFoundError(section)
else:
return settings, section_present
return settings
def _read_one_file(self, file_name, section, settings=None):
"""Read settings from a single config file."""
if settings is None:
settings = self.get_defaults(file_name)
items, section_present = self.read_section(file_name, section)
default_extends = settings.get("extends", None)
extends = items.pop("extends", default_extends)
if extends:
result = self.parse_file_name_and_section(
extends,
extender=file_name,
extender_section=section,
)
extends, extends_section = result
if extends == file_name:
result = self._read_one_file(file_name, extends_section, settings)
extends_items, extends_section_present, *rest = result
settings.update(extends_items)
section_present = section_present or extends_section_present
else:
extends_section = None
settings.update(items)
settings.pop("extends", None)
return settings, section_present, extends, extends_section
def get_default_section(self, file_name):
return None
def decode_items(self, items):
"""Bulk decode items read from file to Python objects.
Args:
items: A sequence of items as would be returned from
``dict.items()``.
Returns:
dict: A new dict with item values decoded where possible.
Values that can't be decoded will be wrapped with
:class:`RawValue`.
"""
decoded_items = {}
for k, v in items:
try:
v = self.decode_value(v)
except ValueError:
v = RawValue(v)
decoded_items[k] = v
return decoded_items
def decode_value(self, value):
"""Decode value read from file to Python object."""
return value
def encode_value(self, value):
"""Encode Python object as value that can be written to file."""
return value
class LocalSettingsConfigParser(RawConfigParser):
def options(self, section):
# Order [DEFAULT] options before section options; the default
# implementation orders them after.
options = list(self._defaults.keys())
try:
section = self._sections[section]
except KeyError:
raise NoSectionError(section) from None
options.extend(k for k in section.keys() if k not in self._defaults)
return options
def optionxform(self, option):
# Don't alter option names; the default implementation lower
# cases them.
return option
def get_section(self, section):
"""Get section without defaults.
When retrieving a section from a :class:`RawConfigParser` using
``parser[section]``, defaults are included. To get a section's
items without defaults, we have to access the semi-private
``_sections`` instance variable, which is encapsulated here.
"""
return self._sections[section]
class INIStrategy(Strategy):
file_types = ("ini",)
def __init__(self):
# Cache parsers by file name
self._parser_cache = {}
def get_defaults(self, file_name):
"""Get default settings from ``[DEFAULT]`` section of file."""
parser = self.get_parser(file_name)
defaults = parser.defaults()
return self.decode_items(defaults.items())
def read_section(self, file_name, section):
parser = self.get_parser(file_name)
if parser.has_section(section):
items, section_present = parser.get_section(section), True
else:
items, section_present = {}, False
decoded_items = self.decode_items(items.items())
return decoded_items, section_present
def write_settings(self, settings, file_name, section):
file_name, section = self.parse_file_name_and_section(file_name, section)
parser = self.make_parser()
if os.path.exists(file_name):
with open(file_name) as fp:
parser.read_file(fp)
else:
log.info("Creating new local settings file: %s", file_name)
if section not in parser:
log.info("Adding new section to %s: %s", file_name, section)
parser.add_section(section)
sorted_keys = sorted(settings.keys())
for name in sorted_keys:
value = settings[name]
parser[section][name] = value
with open(file_name, "w") as fp:
parser.write(fp)
for name in sorted_keys:
value = settings[name]
log.info("Saved %s to %s as: %s", name, file_name, value)
def get_default_section(self, file_name):
"""Returns first non-DEFAULT section; falls back to DEFAULT."""
if not os.path.isfile(file_name):
return "DEFAULT"
parser = self.get_parser(file_name)
sections = parser.sections()
section = sections[0] if len(sections) > 0 else "DEFAULT"
return section
def get_parser(self, file_name):
if file_name not in self._parser_cache:
parser = self.make_parser()
with open(file_name) as fp:
parser.read_file(fp)
self._parser_cache[file_name] = parser
return self._parser_cache[file_name]
def make_parser(self):
return LocalSettingsConfigParser()
class INIJSONStrategy(INIStrategy):
file_types = ("cfg",)
def __init__(self):
super().__init__()
self._decoder = Decoder(strict=True, object_converter=None)
self._decode = self._decoder.decode
self._encoder = Encoder()
self._encode = self._encoder.encode
def decode_value(self, value):
try:
value = self._decode(value)
except DecodeError as exc:
raise ValueError(
f"Could not parse `{value}` as JSON, number, or "
f"datetime (issue at line {exc.line} column "
f"{exc.column} position {exc.position})",
)
return value
def encode_value(self, value):
return self._encode(value)
def get_strategy_types():
"""Get a list of all :class:`Strategy` subclasses.
The list will be ordered by file type extension.
"""
def get_subtypes(type_):
subtypes = type_.__subclasses__()
for subtype in subtypes:
subtypes.extend(get_subtypes(subtype))
return subtypes
sub_types = get_subtypes(Strategy)
return sorted(sub_types, key=lambda t: t.file_types[0])
def get_file_type_map():
"""Map file types (extensions) to strategy types."""
file_type_map = {}
for strategy_type in get_strategy_types():
for ext in strategy_type.file_types:
if ext in file_type_map:
raise KeyError(
f"File type {ext} already registered to " f"{file_type_map[ext]}"
)
file_type_map[ext] = strategy_type
return file_type_map
def guess_strategy_type(file_name_or_ext):
"""Guess strategy type to use for file by extension.
Args:
file_name_or_ext: Either a file name with an extension or just
an extension
Returns:
Strategy: Type corresponding to extension or None if there's no
corresponding strategy type
"""
if "." not in file_name_or_ext:
ext = file_name_or_ext
else:
name, ext = os.path.splitext(file_name_or_ext)
ext = ext.lstrip(".")
file_type_map = get_file_type_map()
return file_type_map.get(ext, None)
| |
#!/usr/bin/python
import os
import glob
import time
from datetime import datetime
import datetime
import traceback
import decimal
from models import Probe, PowerSwitchTail2, Heater
class Pump(PowerSwitchTail2):
pass
class Heating(object):
def __init__(self, heaters=[]):
assert isinstance(heaters, list)
for heater in heaters:
assert isinstance(heater, Heater)
self.heaters = heaters
self.is_on = False
def turn_on(self):
[heater.turn_on() for heater in self.heaters]
self.is_on = True
def turn_off(self):
[heater.turn_off() for heater in self.heaters]
self.is_on = False
def __str__(self):
return "\n".join([str(heater) for heater in self.heaters])
def __repr__(self):
return "Heating(heaters=[" + ",".join([repr(heater) for heater in self.heaters]) + "])"
class Step(object):
def __init__(self, name, duration, temp):
self.name = name
self.duration = duration
self.temp = float(temp)
self.min_temp = self.temp - 0.5
self.max_temp = self.temp + 0.5
@property
def time_elapsed(self):
if not hasattr(self, 'start_time'):
# Step hasn't started yet
return 0
return (time.time() - self.start_time) / 60.0
def __str__(self):
return "name = {}, duration = {}, temp = {}".format(self.name, self.duration, self.temp)
def __repr__(self):
return "Step(name='{}', duration={}, temp={})".format(self.name, self.duration, self.temp)
class Herms(object):
def __init__(self, hlt_probe, mashtun_probe, heating, pump, steps=[], room_temp=70, water_grist_ratio=1.25):
assert isinstance(hlt_probe, Probe)
assert isinstance(mashtun_probe, Probe)
assert isinstance(heating, Heating)
assert isinstance(steps, list)
self.hlt_probe = hlt_probe
self.mashtun_probe = mashtun_probe
self.heating = heating
self.pump = pump
self.steps = steps
self.current_step_index = -1 # Use next_step() to go to first step
self.current_step = steps[0]
self.temp_history = {}
self.initialized_strike_water = False
# There may be a period of time after strike water is ready and user has completed the strike
self.strike_completed = False
# Strike Ready is intermediate step before strike completed which just stops the pump so user can strike
self.strike_ready = False
self.room_temp = room_temp
self.water_grist_ratio = water_grist_ratio
self.strike_water_temp = self._calculate_strike_water_temp()
self.mash_complete = False
self.data = [] # Data for mash temps
self.strike_data = [] # Data for warming up strike water only
def __str__(self):
return "{}\n{}\n{}\n{}\n".format(self.hlt_probe, self.mashtun_probe, self.heating, self.steps)
def __repr__(self):
return "Herms(hlt_probe={}, mashtun_probe={}, heating={}".format(repr(self.hlt_probe), repr(self.mashtun_probe),
repr(self.heating)) + \
", steps=[" + ",".join([repr(step) for step in self.steps]) + '])'
def _calculate_strike_water_temp(self):
return (0.2/self.water_grist_ratio) * (self.steps[0].temp - self.room_temp) + (self.steps[0].temp)
@property
def time_elapsed(self):
return (time.time() - self.start_time) / 60.0
@property
def strike_time_elapsed(self):
return (time.time() - self.strike_start_time) / 60.0
@property
def hlt_temp(self):
return self.hlt_probe.temp
@property
def mashtun_temp(self):
return self.mashtun_probe.temp
def initialize_strike_water(self):
self.strike_start_time = time.time()
self.pump.turn_on()
while not self.strike_ready:
# Read temps
hlt_temp = self.hlt_probe.temp
mashtun_temp = self.mashtun_probe.temp
# Add it to data set
self.stirke_data.append([self.time_elapsed, hlt_temp, mashtun_temp])
# Adjust temperature if necessary
if hlt_temp < self.strike_water_temp:
if not self.pump.is_on:
self.pump.turn_on()
self.heating.turn_on()
self.initialized_strike_water = False
elif hlt_temp >= self.strike_water_temp:
self.heating.turn_off()
if self.pump.is_on():
self.pump.turn_off()
self.initialized_strike_water = True
time.sleep(5)
# remove this function as it should no longer be called
del self.initialize_strike_water
return
def _next_step(self):
self.current_step_index += 1
self.current_step = self.steps[self.current_step_index]
# Start individual Step's duration timer
self.current_step.start_time = time.time()
# Start pump in the event it was off during the Strike/first step
self.pump.turn_on()
def run(self):
if not self.strike_completed:
raise RuntimeError("Don't execute brew.run() unless strike is completed.")
# Start Total Duration
self.start_time = time.time()
# Advance to first step (sets current_step_index to 0)
self._next_step()
while True:
# Check to see if next step is ready
if self.current_step.time_elapsed > self.current_step.duration:
if len(self.steps) == self.current_step_index + 1:
# This is the last step. Time to Sparge
self.mash_complete = True
return
else:
# Move to next step
self._next_step()
# Read temps
hlt_temp = self.hlt_probe.temp
mashtun_temp = self.mashtun_probe.temp
# Add it to data set
self.data.append([self.time_elapsed, hlt_temp, mashtun_temp])
# Adjust temperature if necessary
if hlt_temp >= self.current_step.max_temp:
self.heating.turn_off()
# For first step, HLT water will be at strike water temp, so turn off pump while HLT is cooling down
# This could also do the same for all steps, perhaps?
if self.current_step_index == 0:
if self.pump.is_on():
self.pump.turn_off()
elif hlt_temp < self.current_step.min_temp:
self.heating.turn_on()
# For first step, HLT water will be at strike water temp, so turn off pump while HLT is cooling down
# This could also do the same for all steps, perhaps?
if self.current_step_index == 0:
if not self.pump.is_on():
self.pump.turn_on()
# Sleep
time.sleep(5)
| |
import itertools
import traceback
from eventlet import event
from eventlet import greenthread
from eventlet import queue
from eventlet import semaphore
from eventlet.support import greenlets as greenlet
__all__ = ['GreenPool', 'GreenPile']
DEBUG = True
class GreenPool(object):
"""The GreenPool class is a pool of green threads.
"""
def __init__(self, size=1000):
self.size = size
self.coroutines_running = set()
self.sem = semaphore.Semaphore(size)
self.no_coros_running = event.Event()
def resize(self, new_size):
""" Change the max number of greenthreads doing work at any given time.
If resize is called when there are more than *new_size* greenthreads
already working on tasks, they will be allowed to complete but no new
tasks will be allowed to get launched until enough greenthreads finish
their tasks to drop the overall quantity below *new_size*. Until
then, the return value of free() will be negative.
"""
size_delta = new_size - self.size
self.sem.counter += size_delta
self.size = new_size
def running(self):
""" Returns the number of greenthreads that are currently executing
functions in the GreenPool."""
return len(self.coroutines_running)
def free(self):
""" Returns the number of greenthreads available for use.
If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will
block the calling greenthread until a slot becomes available."""
return self.sem.counter
def spawn(self, function, *args, **kwargs):
"""Run the *function* with its arguments in its own green thread.
Returns the :class:`GreenThread <eventlet.greenthread.GreenThread>`
object that is running the function, which can be used to retrieve the
results.
If the pool is currently at capacity, ``spawn`` will block until one of
the running greenthreads completes its task and frees up a slot.
This function is reentrant; *function* can call ``spawn`` on the same
pool without risk of deadlocking the whole thing.
"""
# if reentering an empty pool, don't try to wait on a coroutine freeing
# itself -- instead, just execute in the current coroutine
current = greenthread.getcurrent()
if self.sem.locked() and current in self.coroutines_running:
# a bit hacky to use the GT without switching to it
gt = greenthread.GreenThread(current)
gt.main(function, args, kwargs)
return gt
else:
self.sem.acquire()
gt = greenthread.spawn(function, *args, **kwargs)
if not self.coroutines_running:
self.no_coros_running = event.Event()
self.coroutines_running.add(gt)
gt.link(self._spawn_done)
return gt
def _spawn_n_impl(self, func, args, kwargs, coro):
try:
try:
func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit):
raise
except:
if DEBUG:
traceback.print_exc()
finally:
if coro is None:
return
else:
coro = greenthread.getcurrent()
self._spawn_done(coro)
def spawn_n(self, function, *args, **kwargs):
"""Create a greenthread to run the *function*, the same as
:meth:`spawn`. The difference is that :meth:`spawn_n` returns
None; the results of *function* are not retrievable.
"""
# if reentering an empty pool, don't try to wait on a coroutine freeing
# itself -- instead, just execute in the current coroutine
current = greenthread.getcurrent()
if self.sem.locked() and current in self.coroutines_running:
self._spawn_n_impl(function, args, kwargs, None)
else:
self.sem.acquire()
g = greenthread.spawn_n(self._spawn_n_impl,
function, args, kwargs, True)
if not self.coroutines_running:
self.no_coros_running = event.Event()
self.coroutines_running.add(g)
def waitall(self):
"""Waits until all greenthreads in the pool are finished working."""
assert greenthread.getcurrent() not in self.coroutines_running, \
"Calling waitall() from within one of the "\
"GreenPool's greenthreads will never terminate."
if self.running():
self.no_coros_running.wait()
def _spawn_done(self, coro):
self.sem.release()
if coro is not None:
self.coroutines_running.remove(coro)
# if done processing (no more work is waiting for processing),
# we can finish off any waitall() calls that might be pending
if self.sem.balance == self.size:
self.no_coros_running.send(None)
def waiting(self):
"""Return the number of greenthreads waiting to spawn.
"""
if self.sem.balance < 0:
return -self.sem.balance
else:
return 0
def _do_map(self, func, it, gi):
for args in it:
gi.spawn(func, *args)
gi.spawn(return_stop_iteration)
def starmap(self, function, iterable):
"""This is the same as :func:`itertools.starmap`, except that *func* is
executed in a separate green thread for each item, with the concurrency
limited by the pool's size. In operation, starmap consumes a constant
amount of memory, proportional to the size of the pool, and is thus
suited for iterating over extremely long input lists.
"""
if function is None:
function = lambda *a: a
gi = GreenMap(self.size)
greenthread.spawn_n(self._do_map, function, iterable, gi)
return gi
def imap(self, function, *iterables):
"""This is the same as :func:`itertools.imap`, and has the same
concurrency and memory behavior as :meth:`starmap`.
It's quite convenient for, e.g., farming out jobs from a file::
def worker(line):
return do_something(line)
pool = GreenPool()
for result in pool.imap(worker, open("filename", 'r')):
print result
"""
return self.starmap(function, itertools.izip(*iterables))
def return_stop_iteration():
return StopIteration()
class GreenPile(object):
"""GreenPile is an abstraction representing a bunch of I/O-related tasks.
Construct a GreenPile with an existing GreenPool object. The GreenPile will
then use that pool's concurrency as it processes its jobs. There can be
many GreenPiles associated with a single GreenPool.
A GreenPile can also be constructed standalone, not associated with any
GreenPool. To do this, construct it with an integer size parameter instead
of a GreenPool.
It is not advisable to iterate over a GreenPile in a different greenthread
than the one which is calling spawn. The iterator will exit early in that
situation.
"""
def __init__(self, size_or_pool=1000):
if isinstance(size_or_pool, GreenPool):
self.pool = size_or_pool
else:
self.pool = GreenPool(size_or_pool)
self.waiters = queue.LightQueue()
self.used = False
self.counter = 0
def spawn(self, func, *args, **kw):
"""Runs *func* in its own green thread, with the result available by
iterating over the GreenPile object."""
self.used = True
self.counter += 1
try:
gt = self.pool.spawn(func, *args, **kw)
self.waiters.put(gt)
except:
self.counter -= 1
raise
def __iter__(self):
return self
def next(self):
"""Wait for the next result, suspending the current greenthread until it
is available. Raises StopIteration when there are no more results."""
if self.counter == 0 and self.used:
raise StopIteration()
try:
return self.waiters.get().wait()
finally:
self.counter -= 1
# this is identical to GreenPile but it blocks on spawn if the results
# aren't consumed, and it doesn't generate its own StopIteration exception,
# instead relying on the spawning process to send one in when it's done
class GreenMap(GreenPile):
def __init__(self, size_or_pool):
super(GreenMap, self).__init__(size_or_pool)
self.waiters = queue.LightQueue(maxsize=self.pool.size)
def next(self):
try:
val = self.waiters.get().wait()
if isinstance(val, StopIteration):
raise val
else:
return val
finally:
self.counter -= 1
| |
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import imath
import IECore
import IECoreScene
import IECoreHoudini
import unittest
import os
import math
class TestFromHoudiniPointsConverter( IECoreHoudini.TestCase ) :
def createBox( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
return box
def createTorus( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
return torus
def createPoints( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
points.parm( "npts" ).set( 5000 )
facet.setInput( 0, box )
points.setInput( 0, facet )
return points
def createPopNet( self ):
obj = hou.node( '/obj' )
geo = obj.createNode("geo", run_init_scripts=False)
popNet = geo.createNode("dopnet", "popnet" )
popObject = popNet.createNode( "popobject" )
popSolver = popObject.createOutputNode( "popsolver" )
output = popSolver.createOutputNode( "output" )
output.setDisplayFlag( True )
return popNet
# creates a converter
def testCreateConverter( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniPointsConverter( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
return converter
# creates a converter
def testFactory( self ) :
box = self.createBox()
points = self.createPoints()
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( points, resultType = IECoreScene.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter )
self.assertEqual( converter, None )
self.failUnless( IECoreScene.TypeId.PointsPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECoreScene.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECoreScene.TypeId.PointsPrimitive ] )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# performs geometry conversion
def testDoConversion( self ) :
converter = self.testCreateConverter()
result = converter.convert()
self.assert_( result.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
def testConvertFromHOMGeo( self ) :
geo = self.createPoints().geometry()
converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo )
self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.failUnless( result.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECoreScene.TypeId.PointsPrimitive )
self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# convert a mesh
def testConvertMesh( self ) :
torus = self.createTorus()
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
result = converter.convert()
self.assertEqual( result.typeId(), IECoreScene.PointsPrimitive.staticTypeId() )
bbox = result.bound()
self.assertEqual( bbox.min().x, -2.0 )
self.assertEqual( bbox.max().x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min().x )
self.assert_( result["P"].data[i].x <= bbox.max().x )
# test prim/vertex attributes
def testConvertPrimVertAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_v3f )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_i3 )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min().x, -2.0 )
self.assertEqual( bbox.max().x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min().x )
self.assert_( result["P"].data[i].x <= bbox.max().x )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cd" not in result )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f" ]
for a in attrs :
self.assert_( a not in result )
self.assert_( result.arePrimitiveVariablesValid() )
# test prim/vertex attributes on a single primitive (mesh)
def testConvertMeshPrimVertAttributes( self ) :
torus = self.createTorus()
torus.parm( "type" ).set( 1 )
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_quat = geo.createNode( "attribcreate", node_name = "vert_quat", exact_type_name=True )
vert_quat.parm("name").set("orient")
vert_quat.parm("class").set(3)
vert_quat.parm("size").set(4)
vert_quat.parm("value1").setExpression("$VTX*0.1")
vert_quat.parm("value2").setExpression("$VTX*0.2")
vert_quat.parm("value3").setExpression("$VTX*0.3")
vert_quat.parm("value4").setExpression("$VTX*0.4")
vert_quat.setInput( 0, vert_f3 )
vert_quat2 = geo.createNode( "attribcreate", node_name = "vert_quat2", exact_type_name=True )
vert_quat2.parm("name").set("quat_2")
vert_quat2.parm("class").set(3)
vert_quat2.parm("size").set(4)
vert_quat2.parm("typeinfo").set(6) # set type info to quaternion
vert_quat2.parm("value1").setExpression("$VTX*0.2")
vert_quat2.parm("value2").setExpression("$VTX*0.4")
vert_quat2.parm("value3").setExpression("$VTX*0.6")
vert_quat2.parm("value4").setExpression("$VTX*0.8")
vert_quat2.setInput( 0, vert_quat )
vert_m44create = geo.createNode( "attribcreate", node_name = "vert_m44create", exact_type_name=True )
vert_m44create.parm("name").set("m44")
vert_m44create.parm("class").set(3)
vert_m44create.parm("size").set(16)
vert_m44create.parm("typeinfo").set(7) # set type info to transformation matrix
vert_m44create.setInput( 0, vert_quat2 )
vert_m44 = geo.createNode( "attribwrangle", node_name = "vert_m44", exact_type_name=True )
vert_m44.parm("snippet").set("4@m44 = maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 });")
vert_m44.parm("class").set(3)
vert_m44.setInput( 0, vert_m44create )
vert_m33create = geo.createNode( "attribcreate", node_name = "vert_m33create", exact_type_name=True )
vert_m33create.parm("name").set("m33")
vert_m33create.parm("class").set(3)
vert_m33create.parm("size").set(9)
vert_m33create.setInput( 0, vert_m44 )
vert_m33 = geo.createNode( "attribwrangle", node_name = "vert_m33", exact_type_name=True )
vert_m33.parm("snippet").set("3@m33 = matrix3(maketransform(0,0,{ 0, 0, 0 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 }));")
vert_m33.parm("class").set(3)
vert_m33.setInput( 0, vert_m33create )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_m33 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
vertString = geo.createNode( "attribcreate", node_name = "vertString", exact_type_name=True )
vertString.parm("name").set("vertString")
vertString.parm("class").set(3)
vertString.parm("type").set(3)
vertString.parm("string").setExpression("'string %06d!' % pwd().curPoint().number()", hou.exprLanguage.Python)
vertString.setInput( 0, vert_v3f )
vertString2 = geo.createNode( "attribcreate", node_name = "vertString2", exact_type_name=True )
vertString2.parm("name").set("vertString2")
vertString2.parm("class").set(3)
vertString2.parm("type").set(3)
vertString2.parm("string").setExpression("vals = [ 'd','c','e','a','g','f','b' ]\nreturn vals[ pwd().curPoint().number() % 7 ]", hou.exprLanguage.Python)
vertString2.setInput( 0, vertString )
vert_iList = geo.createNode( "attribwrangle", node_name = "vert_iList", exact_type_name=True )
vert_iList.parm("snippet").set("int i[];\ni[]@vert_iList = i;")
vert_iList.parm("class").set(3)
vert_iList.setInput( 0, vertString2 )
vert_fList = geo.createNode( "attribwrangle", node_name = "vert_fList", exact_type_name=True )
vert_fList.parm("snippet").set("float f[];\nf[]@vert_fList = f;")
vert_fList.parm("class").set(3)
vert_fList.setInput( 0, vert_iList )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_fList )
detail_m33create = geo.createNode( "attribcreate", node_name = "detail_m33create", exact_type_name=True )
detail_m33create.parm("name").set("detail_m33")
detail_m33create.parm("class").set(0)
detail_m33create.parm("size").set(9)
detail_m33create.setInput( 0, detail_i3 )
detail_m33 = geo.createNode( "attribwrangle", node_name = "detail_m33", exact_type_name=True )
detail_m33.parm("snippet").set("3@detail_m33 = matrix3( maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 }) );")
detail_m33.parm("class").set(0)
detail_m33.setInput( 0, detail_m33create )
detail_m44create = geo.createNode( "attribcreate", node_name = "detail_m44create", exact_type_name=True )
detail_m44create.parm("name").set("detail_m44")
detail_m44create.parm("class").set(0)
detail_m44create.parm("size").set(16)
detail_m44create.setInput( 0, detail_m33 )
detail_m44 = geo.createNode( "attribwrangle", node_name = "detail_m44", exact_type_name=True )
detail_m44.parm("snippet").set("4@detail_m44 = maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 });")
detail_m44.parm("class").set(0)
detail_m44.setInput( 0, detail_m44create )
detail_iList = geo.createNode( "attribwrangle", node_name = "detail_iList", exact_type_name=True )
detail_iList.parm("snippet").set("int i[];\ni[]@detail_iList = i;")
detail_iList.parm("class").set(0)
detail_iList.setInput( 0, detail_m44 )
detail_fList = geo.createNode( "attribwrangle", node_name = "detail_fList", exact_type_name=True )
detail_fList.parm("snippet").set("float f[];\nf[]@detail_fList = f;")
detail_fList.parm("class").set(0)
detail_fList.setInput( 0, detail_iList )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_fList )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min().x, -2.0 )
self.assertEqual( bbox.max().x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min().x )
self.assert_( result["P"].data[i].x <= bbox.max().x )
# integer and float list attributes are not currently supported, so should not appear in the primitive variable lists:
self.assertTrue( "vert_iList" not in result.keys() )
self.assertTrue( "vert_fList" not in result.keys() )
self.assertTrue( "detail_iList" not in result.keys() )
self.assertTrue( "detail_fList" not in result.keys() )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cs" in result )
self.assertEqual( result["Cs"].data.typeId(), IECore.TypeId.Color3fVectorData )
self.assertEqual( result["Cs"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( result["Cs"].data.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
for i in range( 0, result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) ) :
for j in range( 0, 3 ) :
self.assert_( result["Cs"].data[i][j] >= 0.0 )
self.assert_( result["Cs"].data[i][j] <= 1.0 )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "orient", "quat_2", "vert_i1", "vert_i2", "vert_i3", "vert_v3f" ]
for a in attrs :
self.assert_( a in result )
self.assertEqual( result[a].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result[a].data.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
# test indexed vertex attributes
for a in [ "vertString", "vertString2" ] :
self.assert_( a in result )
self.assertEqual( result[a].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result[a].indices.size(), result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["vert_f1"].data.typeId(), IECore.FloatVectorData.staticTypeId() )
self.assertEqual( result["vert_f2"].data.typeId(), IECore.V2fVectorData.staticTypeId() )
self.assertEqual( result["vert_f3"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["orient"].data.typeId(), IECore.QuatfVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_f3"].data[i][j] >= 0.0 )
self.assert_( result["vert_f3"].data[i][j] < 400.1 )
self.assertAlmostEqual( result["orient"].data[i].r(), i * 0.4,5 )
self.assertAlmostEqual( result["orient"].data[i].v()[0], i * 0.1,5 )
self.assertAlmostEqual( result["orient"].data[i].v()[1], i * 0.2,5 )
self.assertAlmostEqual( result["orient"].data[i].v()[2], i * 0.3,5 )
self.assertAlmostEqual( result["quat_2"].data[i].r(), i * 0.8,5 )
self.assertAlmostEqual( result["quat_2"].data[i].v()[0], i * 0.2,5 )
self.assertAlmostEqual( result["quat_2"].data[i].v()[1], i * 0.4,5 )
self.assertAlmostEqual( result["quat_2"].data[i].v()[2], i * 0.6,5 )
self.assertEqual( result["vert_i1"].data.typeId(), IECore.IntVectorData.staticTypeId() )
self.assertEqual( result["vert_i2"].data.typeId(), IECore.V2iVectorData.staticTypeId() )
self.assertEqual( result["vert_i3"].data.typeId(), IECore.V3iVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_i3"].data[i][j] < 10 )
self.assertEqual( result["vert_v3f"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["vertString"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["vertString"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["vertString"].indices.typeId(), IECore.TypeId.IntVectorData )
for i in range( 0, result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) ) :
index = result["vertString"].indices[i]
self.assertEqual( index, i )
self.assertEqual( result["vertString"].data[index], "string %06d!" % index )
# make sure the string tables are alphabetically sorted:
self.assertEqual( result["vertString2"].data, IECore.StringVectorData( ['a','b','c','d','e','f','g'] ) )
stringVals = [ 'd','c','e','a','g','f','b' ]
for i in range( 0, result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) ) :
index = result["vertString2"].indices[i]
self.assertEqual( result["vertString2"].data[ index ], stringVals[ i % 7 ] )
self.assertEqual( result["m44"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["m44"].data.typeId(), IECore.M44fVectorData.staticTypeId() )
matrixScale, matrixShear, matrixRot, matrixTranslation = imath.V3f(), imath.V3f(), imath.V3f(), imath.V3f()
result["m44"].data[0].extractSHRT( matrixScale, matrixShear, matrixRot, matrixTranslation )
self.assertEqual( matrixTranslation, imath.V3f( 10,20,30 ) )
self.assertTrue( matrixRot.equalWithRelError( imath.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( imath.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["detail_m44"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["detail_m44"].data.typeId(), IECore.M44fData.staticTypeId() )
result["detail_m44"].data.value.extractSHRT( matrixScale, matrixShear, matrixRot, matrixTranslation )
self.assertEqual( matrixTranslation, imath.V3f( 10,20,30 ) )
self.assertTrue( matrixRot.equalWithRelError( imath.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( imath.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["m33"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["m33"].data.typeId(), IECore.M33fVectorData.staticTypeId() )
m3 = result["m33"].data[0]
m4 = imath.M44f(
m3[0][0], m3[0][1], m3[0][2], 0.0,
m3[1][0], m3[1][1], m3[1][2], 0.0,
m3[2][0], m3[2][1], m3[2][2], 0.0,
0.0, 0.0, 0.0, 1.0
)
m4.extractSHRT( matrixScale, matrixShear, matrixRot, matrixTranslation )
self.assertTrue( matrixRot.equalWithRelError( imath.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( imath.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["detail_m33"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["detail_m33"].data.typeId(), IECore.M33fData.staticTypeId() )
m3 = result["detail_m33"].data.value
m4 = imath.M44f(
m3[0][0], m3[0][1], m3[0][2], 0.0,
m3[1][0], m3[1][1], m3[1][2], 0.0,
m3[2][0], m3[2][1], m3[2][2], 0.0,
0.0, 0.0, 0.0, 1.0
)
m4.extractSHRT( matrixScale, matrixShear, matrixRot, matrixTranslation )
self.assertTrue( matrixRot.equalWithRelError( imath.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( imath.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assert_( result.arePrimitiveVariablesValid() )
# convert some points
def testConvertPoints( self ) :
points = self.createPoints()
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
result = converter.convert()
self.assertEqual( result.typeId(), IECoreScene.PointsPrimitive.staticTypeId() )
self.assertEqual( points.parm('npts').eval(), result.numPoints )
self.assert_( "P" in result.keys() )
self.assert_( "N" in result.keys() )
self.assert_( result.arePrimitiveVariablesValid() )
# simple attribute conversion
def testSetupAttributes( self ) :
points = self.createPoints()
geo = points.parent()
attr = geo.createNode( "attribcreate", exact_type_name=True )
attr.setInput( 0, points )
attr.parm("name").set( "test_attribute" )
attr.parm("type").set(0) # float
attr.parm("size").set(1) # 1 element
attr.parm("value1").set(123.456)
attr.parm("value2").set(654.321)
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" in result.keys() )
self.assertEqual( result["test_attribute"].data.size(), points.parm('npts').eval() )
self.assert_( result.arePrimitiveVariablesValid() )
return attr
# testing point attributes and types
def testPointAttributes( self ) :
attr = self.testSetupAttributes()
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatVectorData )
self.assert_( result["test_attribute"].data[0] > 123.0 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attribute"].data[0], 123 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fVectorData )
self.assertEqual( result["test_attribute"].data[0], imath.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iVectorData )
self.assertEqual( result["test_attribute"].data[0], imath.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result["test_attribute"].data[0],imath.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iVectorData )
self.assertEqual( result["test_attribute"].data[0], imath.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).setExpression("'string %06d!' % pwd().curPoint().number()", hou.exprLanguage.Python)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["test_attribute"].data[10], "string 000010!" )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["test_attribute"].indices[10], 10 )
self.assertEqual( result["test_attribute"].indices.size(), 5000 )
self.assert_( result.arePrimitiveVariablesValid() )
# testing detail attributes and types
def testDetailAttributes( self ) :
attr = self.testSetupAttributes()
attr.parm("class").set(0) # detail attribute
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
attr.parm("value1").set(123.456)
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatData )
self.assert_( result["test_attribute"].data > IECore.FloatData( 123.0 ) )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntData )
self.assertEqual( result["test_attribute"].data, IECore.IntData( 123 ) )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fData )
self.assertEqual( result["test_attribute"].data.value, imath.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iData )
self.assertEqual( result["test_attribute"].data.value, imath.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fData )
self.assertEqual( result["test_attribute"].data.value, imath.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result["test_attribute"].data.value, imath.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringData )
self.assertEqual( result["test_attribute"].data.value, "string!" )
self.assertEqual( result["test_attribute"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
# testing that float[4] doesn't work!
def testFloat4attr( self ) : # we can't deal with float 4's right now
attr = self.testSetupAttributes()
attr.parm("name").set( "test_attribute" )
attr.parm("size").set(4) # 4 elements per point-attribute
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" not in result.keys() ) # invalid due to being float[4]
self.assert_( result.arePrimitiveVariablesValid() )
# testing conversion of animating geometry
def testAnimatingGeometry( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
mountain = geo.createNode( "mountain" )
if hou.applicationVersion()[0] >= 16:
mountain.parm( "offsetx" ).setExpression( "$FF" )
else:
mountain.parm("offset1").setExpression( "$FF" )
points = geo.createNode( "scatter" )
facet.setInput( 0, torus )
mountain.setInput( 0, facet )
points.setInput( 0, mountain )
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
hou.setFrame(1)
points_1 = converter.convert()
hou.setFrame(2)
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
points_2 = converter.convert()
self.assertNotEqual( points_1["P"].data, points_2["P"].data )
# testing we can handle an object being deleted
def testObjectWasDeleted( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniPointsConverter, torus ) )
# testing we can handle an object being deleted
def testObjectWasDeletedFactory( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniGeometryConverter.create, torus ) )
# testing converting a Houdini particle primitive with detail and point attribs
def testParticlePrimitive( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = self.createPopNet()
location = popnet.createNode( "poplocation" )
popSolver = popnet.node( "popsolver1" )
popSolver.setInput( 2 , location )
detailAttr = popnet.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm("name").set( "float3detail" )
detailAttr.parm("class").set( 0 ) # detail
detailAttr.parm("type").set( 0 ) # float
detailAttr.parm("size").set( 3 ) # 3 elements
detailAttr.parm("value1").set( 1 )
detailAttr.parm("value2").set( 2 )
detailAttr.parm("value3").set( 3 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm("name").set( "float3point" )
pointAttr.parm("class").set( 2 ) # point
pointAttr.parm("type").set( 0 ) # float
pointAttr.parm("size").set( 3 ) # 3 elements
pointAttr.parm("value1").set( 1 )
pointAttr.parm("value2").set( 2 )
pointAttr.parm("value3").set( 3 )
hou.setFrame( 5 )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( pointAttr )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
points = converter.convert()
self.assertEqual( type(points), IECoreScene.PointsPrimitive )
self.assertEqual( points.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ), 1043 )
self.assertEqual( points["float3detail"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( type(points["float3detail"].data), IECore.V3fData )
self.assert_( points["float3detail"].data.value.equalWithRelError( imath.V3f( 1, 2, 3 ), 1e-10 ) )
self.assertEqual( type(points["float3point"].data), IECore.V3fVectorData )
self.assertEqual( points["float3point"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
for p in points["float3point"].data :
self.assert_( p.equalWithRelError( imath.V3f( 1, 2, 3 ), 1e-10 ) )
self.assert_( points.arePrimitiveVariablesValid() )
add = pointAttr.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
self.assertEqual( points2, points )
def testMultipleParticlePrimitives( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = self.createPopNet()
fireworks = popnet.createNode( "popfireworks" )
popSolver = popnet.node("popsolver1")
popSolver.setInput( 2, fireworks )
hou.setFrame( 28 )
converter = IECoreHoudini.FromHoudiniPointsConverter( popnet )
points = converter.convert()
self.assertEqual( type(points), IECoreScene.PointsPrimitive )
self.assertEqual( points.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ), 24 )
self.assertEqual( points["v"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( type(points["v"].data), IECore.V3fVectorData )
self.assertEqual( points["v"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertEqual( points["nextid"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( points["nextid"].data, IECore.IntData( 24 ) )
self.assertTrue( points.arePrimitiveVariablesValid() )
add = popnet.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
# showing that prim attribs don't get converted because the interpolation size doesn't match
self.assertEqual( points2, points )
def testName( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
name = particles.createOutputNode( "name" )
name.parm( "name1" ).set( "points" )
box = points.parent().createNode( "box" )
name2 = box.createOutputNode( "name" )
name2.parm( "name1" ).set( "box" )
merge = name.createOutputNode( "merge" )
merge.setInput( 1, name2 )
converter = IECoreHoudini.FromHoudiniPointsConverter( merge )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
# both shapes were converted as one PointsPrimitive
self.assertEqual( result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ), 5008 )
self.assertEqual( result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "points" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ), 5000 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "box", IECoreScene.TypeId.PointsPrimitive )
self.assertEqual( converter, None )
def testAttributeFilter( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
# add vertex normals
facet = particles.createOutputNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
# add a primitive colour attributes
primcol = facet.createOutputNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
detail = primcol.createOutputNode( "attribcreate", node_name = "detail", exact_type_name=True )
detail.parm("name").set("detailAttr")
detail.parm("class").set(0)
detail.parm("type").set(1)
detail.parm("size").set(3)
detail.parm("value1").set(123)
detail.parm("value2").set(456.789) # can we catch it out with a float?
detail.parm("value3").set(789)
converter = IECoreHoudini.FromHoudiniPointsConverter( detail )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "N", "P", "detailAttr", "varmap" ] )
converter.parameters()["attributeFilter"].setTypedValue( "P" )
self.assertEqual( sorted(converter.convert().keys()), [ "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^N ^varmap" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "detailAttr" ] )
# P must be converted
converter.parameters()["attributeFilter"].setTypedValue( "* ^P" )
self.assertTrue( "P" in converter.convert().keys() )
def testStandardAttributeConversion( self ) :
points = self.createPoints()
color = points.createOutputNode( "color" )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
converter = IECoreHoudini.FromHoudiniPointsConverter( scale )
result = converter.convert()
if hou.applicationVersion()[0] >= 15 :
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "width" ] )
else :
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
converter["convertStandardAttributes"].setTypedValue( False )
result = converter.convert()
if hou.applicationVersion()[0] >= 15 :
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest" ] )
else :
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
if __name__ == "__main__":
unittest.main()
| |
import numbers
import numpy as np
import torch
import torch.autograd
import _ext
import _extc
import error_checking as ec
from kernels import KERNELS, KERNEL_NAMES
MAX_FLOAT = float(np.finfo(np.float32).max)
class ImageProjection(torch.nn.Module):
"""
"""
def __init__(self, camera_fl):
""" Initialize a ParticleProjection layer.
TODO
Arguments:
-camera_fl: The camera focal length in pixels (all pixels are
assumed to be square. This layer does not simulate
any image warping e.g. radial distortion).
"""
super(ImageProjection, self).__init__()
self.camera_fl = ec.check_conditions(camera_fl, "camera_fl",
"%s > 0", "isinstance(%s, numbers.Real)")
self.register_buffer("empty_depth_mask",
torch.ones(1, 1, 1)*MAX_FLOAT)
def _rotationMatrixFromQuaternion(self, quat):
"""
1 - 2*qy2 - 2*qz2 2*qx*qy - 2*qz*qw 2*qx*qz + 2*qy*qw
2*qx*qy + 2*qz*qw 1 - 2*qx2 - 2*qz2 2*qy*qz - 2*qx*qw
2*qx*qz - 2*qy*qw 2*qy*qz + 2*qx*qw 1 - 2*qx2 - 2*qy2
"""
quat = quat.data
qx = quat[:, 0]
qy = quat[:, 1]
qz = quat[:, 2]
qw = quat[:, 3]
qx2 = qx*qx
qxqy = qx*qy
qxqz = qx*qz
qxqw = qx*qw
qy2 = qy*qy
qyqz = qy*qz
qyqw = qy*qw
qz2 = qz*qz
qzqw = qz*qw
ret = quat.new(quat.size()[0], 3, 3)
ret[:, 0, 0] = 1 - 2*qy2 - 2*qz2
ret[:, 1, 0] = 2*qxqy - 2*qzqw
ret[:, 2, 0] = 2*qxqz + 2*qyqw
ret[:, 0, 1] = 2*qxqy + 2*qzqw
ret[:, 1, 1] = 1 - 2*qx2 - 2*qz2
ret[:, 2, 1] = 2*qyqz - 2*qxqw
ret[:, 0, 2] = 2*qxqz - 2*qyqw
ret[:, 1, 2] = 2*qyqz + 2*qxqw
ret[:, 2, 2] = 1 - 2*qx2 - 2*qy2
return torch.autograd.Variable(ret, requires_grad=False)
def forward(self, locs, image, camera_pose, camera_rot, depth_mask=None):
""" Forwad pass for the particle projection. Takes in the set of
particles and outputs an image.
TODO
Arguments:
-locs: A BxNx3 tensor where B is the batch size, N is the number
of particles, and 3 is the dimensionality of the
particles' coordinate space (this layer currently only
supports 3D projections).
-camera_pose: A Bx3 tensor containing the camera translation.
-camera_rot: A Bx4 tensor containing the camera rotation as a
quaternion in xyzw format.
-depth_mask: An optional BxHxW tensor where W and H are the
camera image width and height respectively. If not
None, then this is used to compute occlusions. The
value in each pixel in the depth_mask should be
the distance to the first object. Any particles
further away than that value will not be projected
onto the output image.
Returns: A BxHxW tensor of the projected particles.
"""
# Error checking.
batch_size = locs.size()[0]
N = locs.size()[1]
width = image.size()[3]
height = image.size()[2]
channels = image.size()[1]
ec.check_tensor_dims(locs, "locs", (batch_size, N, 3))
ec.check_tensor_dims(
image, "image", (batch_size, channels, height, width))
ec.check_tensor_dims(camera_pose, "camera_pose", (batch_size, 3))
ec.check_tensor_dims(camera_rot, "camera_rot", (batch_size, 4))
ec.check_nans(locs, "locs")
ec.check_nans(image, "image")
ec.check_nans(camera_pose, "camera_pose")
ec.check_nans(camera_rot, "camera_rot")
if depth_mask is not None:
ec.check_tensor_dims(depth_mask, "depth_mask", (batch_size,
height, width))
ec.check_nans(depth_mask, "depth_mask")
depth_mask = depth_mask.contiguous()
else:
if (self.empty_depth_mask.size()[0] != batch_size or
self.empty_depth_mask.size()[1] != height or
self.empty_depth_mask.size()[2] != width):
self.empty_depth_mask.resize_(batch_size, height, width)
self.empty_depth_mask.fill_(MAX_FLOAT)
depth_mask = torch.autograd.Variable(
self.empty_depth_mask, requires_grad=False)
if locs.is_cuda:
depth_mask = depth_mask.cuda()
# Let's transform the particles to camera space here.
locs = locs - camera_pose.unsqueeze(1)
# Ensure the rotation quaternion is normalized.
camera_rot = camera_rot / \
torch.sqrt(torch.sum(camera_rot**2, 1, keepdim=True))
# Invert the rotation.
inv = camera_rot.data.new(1, 4)
inv[0, 0] = -1
inv[0, 1] = -1
inv[0, 2] = -1
inv[0, 3] = 1
inv = torch.autograd.Variable(inv, requires_grad=False)
camera_rot = camera_rot*inv
rot = self._rotationMatrixFromQuaternion(camera_rot)
if (rot != rot).data.any():
raise ValueError("No NaNs found in camera_rot argument, but NaNs created when"
" constructing a rotation matrix from it.")
# Rotate the locs into camera space.
try:
# There's a bug that causes this to fail on the first call when using cuda.
# To fix that, just call it again.
locs = torch.bmm(locs, rot)
except RuntimeError:
locs = torch.bmm(locs, rot)
if (locs != locs).data.any():
raise ValueError(
"Rotating locs by rotation matrix resulted in NaNs.")
locs = locs.contiguous()
image = image.contiguous()
proj = _ImageProjectionFunction(self.camera_fl)
ret = proj(locs, image, depth_mask)
return ret
"""
INTERNAL FUNCTIONS
"""
class _ImageProjectionFunction(torch.autograd.Function):
def __init__(self, camera_fl):
super(_ImageProjectionFunction, self).__init__()
self.camera_fl = camera_fl
def forward(self, locs, image, depth_mask):
self.save_for_backward(locs, image, depth_mask)
batch_size = locs.size()[0]
N = locs.size()[1]
channels = image.size()[1]
ret = locs.new(batch_size, N, channels)
ret.fill_(0)
if locs.is_cuda:
if not _extc.spnc_imageprojection_forward(locs, image,
self.camera_fl, depth_mask, ret):
raise Exception("Cuda error")
else:
_ext.spn_imageprojection_forward(locs, image,
self.camera_fl, depth_mask, ret)
return ret
def backward(self, grad_output):
locs, image, depth_mask = self.saved_tensors
ret_locs = grad_output.new(locs.size())
ret_locs.fill_(0)
ret_image = grad_output.new(image.size())
ret_image.fill_(0)
ret_depth_mask = grad_output.new(depth_mask.size())
ret_depth_mask.fill_(0)
if grad_output.is_cuda:
if not _extc.spnc_imageprojection_backward(locs, image,
self.camera_fl, depth_mask, grad_output, ret_locs, ret_image):
raise Exception("Cuda error")
else:
_ext.spn_imageprojection_backward(locs, image,
self.camera_fl, depth_mask, grad_output, ret_locs, ret_image)
return (ret_locs,
ret_image,
ret_depth_mask,)
| |
import sys, os, time, logging, shlex, signal, subprocess, errno
import pyev
from ..config import config, get_boolean, get_env
from ..utils import parse_signals, expandvars, enable_nonblocking, disable_nonblocking, get_python_exec, get_signal_name
from ..cnscom import program_state_enum, svrcall_error
from .logmed import log_mediator
from .singleton import get_svrapp
if sys.platform == 'win32':
import msvcrt
import win32file, win32pipe, pywintypes, winerror # from Python Win32
#
try:
import resource
except ImportError:
resource = None
#
L = logging.getLogger("program")
Lmy = logging.getLogger("my") # Message yielding logger
#
class program(object):
DEFAULTS = {
'command': None,
'directory': None,
'umask': None,
'starttimeout': 0.5,
'stoptimeout': 3,
'killby': 'TERM,INT,TERM,INT,TERM,INT,KILL',
'stdin': '<null>', # TODO: This can be very probably removed as there is no reasonable use
'stdout': '<stderr>',
'stderr': '<logdir>',
'priority': 100,
'disabled': False,
'coredump': False,
'autorestart': False,
'processgroup': True,
'logscan_stdout': '',
'logscan_stderr': '',
'notify_fatal': '<global>',
}
def __init__(self, svrapp, config_section):
_, self.ident = config_section.split(':', 2)
self.state = program_state_enum.STOPPED
self.subproc = None
self.launch_cnt = 0
self.autorestart_cnt = 0
self.start_time = None
self.stop_time = None
self.exit_time = None
self.exit_status = None
self.coredump_enabled = None # If true, kill by SIGQUIT -> dump core
if sys.platform != 'win32':
# On Windows we are using periodic pipe check in win32_read_stdfd
self.watchers = [
pyev.Io(0, 0, svrapp.loop, self.__read_stdfd, 0),
pyev.Io(0, 0, svrapp.loop, self.__read_stdfd, 1),
]
# Build configuration
self.config = self.DEFAULTS.copy()
self.config.update(config.items(config_section))
# Prepare program command line
cmd = self.config.get('command')
if cmd is None:
L.error("Missing command option in {0} -> CFGERROR".format(config_section))
self.state = program_state_enum.CFGERROR
return
if cmd == '<httpfend>':
cmd = get_python_exec(cmdline=["-u","-m","ramona.httpfend"])
elif cmd[:1] == '<':
L.error("Unknown command option '{1}' in {0} -> CFGERROR".format(config_section, cmd))
self.state = program_state_enum.CFGERROR
return
cmd = cmd.replace('\\', '\\\\')
self.cmdline = shlex.split(cmd)
# Prepare stop signals
if sys.platform != 'win32':
self.stopsignals = parse_signals(self.config['killby'])
if len(self.stopsignals) == 0: self.stopsignals = [signal.SIGTERM]
self.act_stopsignals = None
if self.config['stdin'] != '<null>':
L.error("Unknown stdin option '{0}' in {1} -> CFGERROR".format(self.config['stdin'], config_section))
self.state = program_state_enum.CFGERROR
return
try:
self.priority = int(self.config.get('priority'))
except:
L.error("Invalid priority option '{0}' in {1} -> CFGERROR".format(self.config['priority'], config_section))
self.state = program_state_enum.CFGERROR
return
try:
dis = get_boolean(self.config.get('disabled'))
except ValueError:
L.error("Unknown/invalid 'disabled' option '{0}' in {1} -> CFGERROR".format(self.config.get('disabled'), config_section))
self.state = program_state_enum.CFGERROR
return
if dis:
self.state = program_state_enum.DISABLED
self.ulimits = {}
#TODO: Enable other ulimits..
try:
coredump = get_boolean(self.config.get('coredump',False))
except ValueError:
L.error("Unknown 'coredump' option '{0}' in {1} -> CFGERROR".format(self.config.get('coredump','?'), config_section))
self.state = program_state_enum.CFGERROR
return
if coredump and resource is not None:
self.ulimits[resource.RLIMIT_CORE] = (-1,-1)
try:
self.autorestart = get_boolean(self.config.get('autorestart',False))
except ValueError:
L.error("Unknown 'autorestart' option '{0}' in {1} -> CFGERROR".format(self.config.get('autorestart','?'), config_section))
self.state = program_state_enum.CFGERROR
return
try:
get_boolean(self.config.get('processgroup',True))
except ValueError:
L.error("Unknown 'processgroup' option '{0}' in {1} -> CFGERROR".format(self.config.get('processgroup','?'), config_section))
self.state = program_state_enum.CFGERROR
return
umask = self.config.get('umask')
if umask is not None:
try:
umask = int(umask, 8)
except:
L.error("Invalid umask option ({1}) in {0} -> CFGERROR".format(config_section, umask))
self.state = program_state_enum.CFGERROR
return
self.config['umask'] = umask
# Prepare log files
stdout_cnf = self.config['stdout']
stderr_cnf = self.config['stderr']
if (stdout_cnf == '<stderr>') and (stderr_cnf == '<stdout>'):
L.error("Invalid stdout and stderr combination in {0} -> CFGERROR".format(config_section))
self.state = program_state_enum.CFGERROR
return
# Stdout settings
if stdout_cnf == '<logdir>':
if stderr_cnf in ('<stdout>','<null>') :
fname = os.path.join(config.get('general','logdir'), self.ident + '.log')
else:
fname = os.path.join(config.get('general','logdir'), self.ident + '-out.log')
self.log_out = log_mediator(self.ident, 'stdout', fname)
elif stdout_cnf == '<stderr>':
pass
elif stdout_cnf == '<null>':
self.log_out = log_mediator(self.ident, 'stdout', None)
elif stdout_cnf[:1] == '<':
L.error("Unknown stdout option in {0} -> CFGERROR".format(config_section))
self.state = program_state_enum.CFGERROR
return
else:
self.log_out = log_mediator(self.ident, 'stdout', stdout_cnf)
# Stderr settings
if stderr_cnf == '<logdir>':
if stdout_cnf in ('<stderr>','<null>') :
fname = os.path.join(config.get('general','logdir'), self.ident + '.log')
else:
fname = os.path.join(config.get('general','logdir'), self.ident + '-err.log')
self.log_err = log_mediator(self.ident, 'stderr', fname)
elif stderr_cnf == '<stdout>':
self.log_err = self.log_out
elif stderr_cnf == '<null>':
self.log_err = log_mediator(self.ident, 'stderr', None)
elif stderr_cnf[:1] == '<':
L.error("Unknown stderr option in {0} -> CFGERROR".format(config_section))
self.state = program_state_enum.CFGERROR
return
else:
self.log_err = log_mediator(self.ident, 'stderr', stderr_cnf)
if stdout_cnf == '<stderr>':
self.log_out = self.log_err
# Log scans
for stream, logmed in [('stdout', self.log_out),('stderr', self.log_err)]:
logscanval = self.config.get('logscan_{0}'.format(stream)).strip()
if len(logscanval) == 0:
logscanval = config.get('ramona:notify','logscan_{}'.format(stream))
if len(logscanval) == 0:
logscanval = config.get('ramona:notify','logscan'.format(stream))
for logscanseg in logscanval.split(','):
logscanseg = logscanseg.strip()
if logscanseg == '': continue
try:
pattern, target = logscanseg.split('>',1)
except ValueError:
L.error("Unknown 'logscan_{2}' option '{0}' in {1} -> CFGERROR".format(logscanseg, config_section, stream))
self.state = program_state_enum.CFGERROR
return
if not validate_notify_target(target):
L.error("Unknown 'logscan_{2}' option '{0}' in {1} -> CFGERROR".format(target, config_section, stream))
self.state = program_state_enum.CFGERROR
return
logmed.add_scanner(pattern, target)
# Environment variables
alt_env = self.config.get('env')
self.env = get_env(alt_env)
self.env['RAMONA_SECTION'] = config_section
# Notification on state change to FATAL
self.notify_fatal_target = self.config.get('notify_fatal', '<global>')
if self.notify_fatal_target == '<global>':
self.notify_fatal_target = config.get('ramona:notify','notify_fatal', 'now')
if self.notify_fatal_target == '<none>':
self.notify_fatal_target = None
if (self.notify_fatal_target is not None) and not validate_notify_target(self.notify_fatal_target):
L.warning("Invalid notify_fatal target: '{}'".format(self.notify_fatal_target))
self.notify_fatal_target = None
def __repr__(self):
ret = "<{0} {1} state={2}".format(self.__class__.__name__, self.ident, program_state_enum.labels.get(self.state, '?'))
if self.subproc is not None:
ret += ' pid={}'.format(self.subproc.pid)
if self.exit_status is not None:
ret += ' exit_status={}'.format(self.exit_status)
return ret+'>'
def start(self, reset_autorestart_cnt=True):
'''Transition to state STARTING'''
assert self.subproc is None
assert self.state in (program_state_enum.STOPPED, program_state_enum.FATAL)
L.debug("{0} ({1}) -> STARTING".format(self, self.cmdline))
# Prepare working directory
directory = self.config.get('directory')
if directory is not None:
directory = expandvars(directory, self.env)
# Launch subprocess
cmdline = [expandvars(arg, self.env) for arg in self.cmdline]
try:
self.subproc = subprocess.Popen(
cmdline,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=self.__preexec_fn if sys.platform != 'win32' else None,
close_fds=True if sys.platform != 'win32' else None,
shell=False, #TOOD: This can/should be configurable in [program:x] section
cwd=directory,
env=self.env
)
except Exception, e:
self.state = program_state_enum.FATAL
Lmy.error("{0} failed to start (now in FATAL state): {1}".format(self.ident, e))
L.error("{0} failed to start: {1} -> FATAL".format(self, e))
return
if sys.platform != 'win32':
enable_nonblocking(self.subproc.stdout)
self.watchers[0].set(self.subproc.stdout, pyev.EV_READ)
self.watchers[0].start()
enable_nonblocking(self.subproc.stderr)
self.watchers[1].set(self.subproc.stderr, pyev.EV_READ)
self.watchers[1].start()
# Open log files
#TODO: Following functions can fail - maybe termination of start sequence is proper reaction
self.log_out.open()
if self.log_out != self.log_err: self.log_err.open()
self.log_err.write("\n-=[ {} STARTING by Ramona on {} ]=-\n".format(self.ident, time.strftime("%Y-%m-%d %H:%M:%S")))
self.state = program_state_enum.STARTING
self.start_time = time.time()
self.stop_time = None
self.exit_time = None
self.exit_status = None
self.coredump_enabled = None
self.launch_cnt += 1
if reset_autorestart_cnt: self.autorestart_cnt = 0
def __preexec_fn(self):
# Launch in dedicated process group (optionally)
if get_boolean(self.config.get('processgroup',True)):
os.setsid()
# Set umask
umask = self.config.get('umask')
if umask is not None:
try:
os.umask(umask)
except Exception, e:
os.write(2, "FATAL: Set umask {0} failed: {1}\n".format(umask, e))
raise
# Set ulimits
if resource is not None:
for k,v in self.ulimits.iteritems():
try:
resource.setrlimit(k,v)
except Exception, e:
os.write(2, "WARNING: Setting ulimit '{1}' failed: {0}\n".format(e, k))
#TODO: Load shell profile if configured (can be used e.g. for virtual-env bootstrap)
def stop(self):
'''Transition to state STOPPING'''
if self.state == program_state_enum.FATAL: return # This can happen and it is probably OK
assert self.subproc is not None
assert self.state in (program_state_enum.RUNNING, program_state_enum.STARTING)
L.debug("{0} -> STOPPING".format(self))
if sys.platform == 'win32':
self.subproc.terminate()
else:
self.act_stopsignals = self.stopsignals[:]
signal = self.get_next_stopsignal()
try:
if get_boolean(self.config.get('processgroup',True)):
os.kill(-self.subproc.pid, signal) # Killing whole process group
else:
os.kill(self.subproc.pid, signal)
except:
pass
self.state = program_state_enum.STOPPING
self.stop_time = time.time()
def on_terminate(self, status):
self.exit_time = time.time()
# Evaluate exit status
if sys.platform == 'win32':
self.exit_status = status
elif os.WIFSIGNALED(status):
self.exit_status = get_signal_name(os.WTERMSIG(status))
elif os.WIFEXITED(status):
self.exit_status = os.WEXITSTATUS(status)
else:
self.exit_status = "?"
# Close process stdout and stderr pipes (including vacuum of actual content)
if sys.platform != 'win32':
self.watchers[0].stop()
self.watchers[0].set(0, 0)
disable_nonblocking(self.subproc.stdout)
while True:
signal.setitimer(signal.ITIMER_REAL, 0.5) # Set timeout for following operation
try:
data = os.read(self.subproc.stdout.fileno(), 4096)
except OSError, e:
if e.errno == errno.EINTR:
L.warning("We have stall recovery situation on stdout socket of {0}".format(self))
# This stall situation can happen when program shares stdout with its child
# e.g. command=bash -c "echo ahoj1; tail -f /dev/null"
break
raise
if len(data) == 0: break
self.log_out.write(data)
self.watchers[1].stop()
self.watchers[1].set(0, 0)
disable_nonblocking(self.subproc.stderr)
while True:
signal.setitimer(signal.ITIMER_REAL, 0.2) # Set timeout for following operation
try:
data = os.read(self.subproc.stderr.fileno(), 4096)
except OSError, e:
if e.errno == errno.EINTR:
L.warning("We have stall recovery situation on stderr socket of {0}".format(self))
# See comment above
break
raise
if len(data) == 0: break
self.log_err.write(data)
elif sys.platform == 'win32':
self.win32_read_stdfd()
# Explicitly destroy subprocess object
if self.subproc is not None: pidtext = ', pid: {}'.format(self.subproc.pid)
else: pidtext = ''
self.subproc = None
# Close log files
self.log_err.write("\n-=[ {} EXITED on {} with status {}{} ]=-\n".format(self.ident, time.strftime("%Y-%m-%d %H:%M:%S"), self.exit_status, pidtext))
self.log_out.close()
self.log_err.close()
# Handle state change properly
if self.state == program_state_enum.STARTING:
Lmy.error("{0} exited too quickly (exit_status:{1}{2}, now in FATAL state)".format(self.ident, self.exit_status, pidtext))
L.error("{0} exited too quickly -> FATAL".format(self))
self.state = program_state_enum.FATAL
self.notify_fatal_state(program_state_enum.STARTING)
elif self.state == program_state_enum.STOPPING:
Lmy.info("{0} is now STOPPED (exit_status:{1}{2})".format(self.ident, self.exit_status, pidtext))
L.debug("{0} -> STOPPED".format(self))
self.state = program_state_enum.STOPPED
else:
orig_state = self.state
if self.autorestart:
Lmy.error("{0} exited unexpectedly and going to be restarted (exit_status:{1}{2})".format(self.ident, self.exit_status, pidtext))
L.error("{0} exited unexpectedly -> FATAL -> autorestart".format(self))
self.state = program_state_enum.FATAL
self.autorestart_cnt += 1
self.notify_fatal_state(orig_state, autorestart=True)
self.start(reset_autorestart_cnt=False)
else:
Lmy.error("{0} exited unexpectedly (exit_status:{1}{2}, now in FATAL state)".format(self.ident, self.exit_status, pidtext))
L.error("{0} exited unexpectedly -> FATAL".format(self))
self.state = program_state_enum.FATAL
self.notify_fatal_state(orig_state)
def on_tick(self, now):
# Switch starting programs into running state
if self.state == program_state_enum.STARTING:
if now - self.start_time >= self.config['starttimeout']:
Lmy.info("{0} is now RUNNING".format(self.ident))
L.debug("{0} -> RUNNING".format(self))
self.state = program_state_enum.RUNNING
elif self.state == program_state_enum.STOPPING:
if now - self.stop_time >= self.config['stoptimeout']:
L.warning("{0} is still terminating - sending another signal".format(self))
signal = self.get_next_stopsignal()
try:
if get_boolean(self.config.get('processgroup',True)):
os.kill(-self.subproc.pid, signal) # Killing whole process group
else:
os.kill(self.subproc.pid, signal)
except:
pass
def get_next_stopsignal(self):
if self.coredump_enabled:
self.coredump_enabled = None
L.debug("Core dump enabled for {0} - using SIGQUIT".format(self))
return signal.SIGQUIT
if len(self.act_stopsignals) == 0: return signal.SIGKILL
return self.act_stopsignals.pop(0)
def __read_stdfd(self, watcher, revents):
try:
while 1:
try:
data = os.read(watcher.fd, 4096)
except OSError, e:
if e.errno == errno.EAGAIN: return # No more data to read (would block)
raise
if len(data) == 0: # File descriptor is closed
watcher.stop()
return
if watcher.data == 0: self.log_out.write(data)
elif watcher.data == 1: self.log_err.write(data)
except:
L.exception("Error during __read_stdfd:")
def win32_read_stdfd(self):
'''Alternative implementation of stdout/stderr non-blocking read for Windows
For details see:
http://code.activestate.com/recipes/440554/
http://msdn.microsoft.com/en-us/library/windows/desktop/aa365779(v=vs.85).aspx
'''
assert self.subproc is not None
if self.subproc.stdout is not None:
while 1:
x = msvcrt.get_osfhandle(self.subproc.stdout.fileno())
try:
(read, nAvail, nMessage) = win32pipe.PeekNamedPipe(x, 0)
except pywintypes.error, e:
if e.winerror == winerror.ERROR_BROKEN_PIPE: break
raise
if nAvail > 4096: nAvail = 4096
if nAvail == 0: break
(errCode, data) = win32file.ReadFile(x, nAvail, None)
self.log_out.write(data)
if self.subproc.stderr is not None:
while 1:
x = msvcrt.get_osfhandle(self.subproc.stderr.fileno())
try:
(read, nAvail, nMessage) = win32pipe.PeekNamedPipe(x, 0)
except pywintypes.error, e:
if e.winerror == winerror.ERROR_BROKEN_PIPE: break
raise
if nAvail > 4096: nAvail = 4096
if nAvail == 0: break
(errCode, data) = win32file.ReadFile(x, nAvail, None)
self.log_err.write(data)
def tail(self, cnscon, stream, lines=80, tailf=False):
if self.state == program_state_enum.CFGERROR:
raise svrcall_error("Program {0} is not correctly configured".format(self.ident))
if stream == 'stdout':
return self.log_out.tail(cnscon, lines, tailf)
elif stream == 'stderr':
return self.log_err.tail(cnscon, lines, tailf)
else:
raise ValueError("Unknown stream '{0}'".format(stream))
def tailf_stop(self, cnscon, stream):
if stream == 'stdout':
return self.log_out.tailf_stop(cnscon)
elif stream == 'stderr':
return self.log_err.tailf_stop(cnscon)
else:
raise ValueError("Unknown stream '{0}'".format(stream))
def charge_coredump(self):
if resource is None:
L.warning("This platform doesn't support core dumps.")
return
l = self.ulimits.get(resource.RLIMIT_CORE, (0,0))
if l == (0,0):
Lmy.warning("Program {0} is not configured to dump code".format(self.ident))
return
self.coredump_enabled = True
def notify_fatal_state(self, orig_state, autorestart=False):
if self.notify_fatal_target is None: return
svrapp = get_svrapp()
if svrapp is None: return
ntftext = 'Program: {}\n'.format(self.ident)
ntftext += 'Changed status: {} -> {}\n'.format(
program_state_enum.labels.get(orig_state, '?'),
program_state_enum.labels.get(self.state, '?')
)
if self.subproc is not None:
ntftext += 'Pid: {}\n'.format(self.subproc.pid)
if self.exit_status is not None:
ntftext += 'Exit status: {}\n'.format(self.exit_status)
if self.state == program_state_enum.FATAL:
if autorestart:
ntftext += 'Auto-restart: YES (count={})\n'.format(self.autorestart_cnt)
else:
ntftext += 'Auto-restart: NO (count={})\n'.format(self.autorestart_cnt)
ntftext += '\nStandard output:\n'+'-'*50+'\n'
log = []
for i, line in enumerate(reversed(self.log_out.tailbuf)):
if i > 20: break
log.insert(0, line)
ntftext += ''.join(log)
ntftext += '\n'+'-'*50+'\n'
if self.log_err != self.log_out:
ntftext += '\nStandard error:\n'+'-'*50+'\n'
log = []
for i, line in enumerate(reversed(self.log_err.tailbuf)):
if i > 20: break
log.insert(0, line)
ntftext += ''.join(log)
ntftext += '\n'+'-'*50+'\n'
svrapp.notificator.publish(self.notify_fatal_target, ntftext, "{} / {}".format(self.ident, program_state_enum.labels.get(self.state, '?')))
def validate_notify_target(target):
x = target.split(':',1)
if len(x) == 1:
if target not in ('now', 'daily'): return False
elif len(x) == 2:
target, email = x
if target not in ('now', 'daily'): return False
else:
return False
return True
| |
#!/hint/python3
import io
import os
import re
import shutil
import string
import subprocess
import sys
from contextlib import contextmanager
from traceback import print_exc
from typing import Callable, Generator, List, Optional, TextIO, Tuple
from . import ansiterm
# run()/run_bincapture()/run_txtcapture() and capture_output() seem like they're
# re-implementing something that should already exist for us to use. And
# indeed, the `run*()` functions are essentially just stdlib `subprocess.run()`
# and `capture_output()` is essentially just stdlib
# `contextlib.redirect_stdout()`+`contextlib.redirect_stderr()`. But the big
# reason for them to exist here is: `contextlib.redirect_*` and `subprocess`
# don't play together! It's infuriating.
#
# So we define a global `_capturing` that is set while `capture_output()` is
# running, and have the `run*()` functions adjust their behavior if it's set.
# We could more generally do this by wrapping either `subprocess.run()` or
# `contextlib.redirect_*()`. If we only ever called the redirect/capture
# function on a real file with a real file descriptor, it would be hairy, but
# not _too_ hairy[1]. But we want to call the redirect/capture function with
# not-a-real-file things like Indent or LineTracker. So we'd have to get even
# hairier... we'd have to do a bunch of extra stuff when the output's
# `.fileno()` raises io.UnsupportedOperation; the same way that Go's
# `os/exec.Cmd` has to do extra stuff when the output ins't an `*os.File` (and
# that's one of the big reasons why I've said that Go's "os/exec" is superior to
# other languages subprocess facilities). And it's my best judgment that just
# special-casing it with `_capturing` is the better choice than taking on all
# the complexity of mimicing Go's brilliance.
#
# [1]: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
_capturing = False
def run(args: List[str]) -> None:
"""run is like "subprocess.run(args)", but with helpful settings and
obeys "with capture_output(out)".
"""
if _capturing:
try:
subprocess.run(args, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
except subprocess.CalledProcessError as err:
raise Exception(f"{err.stdout}{err}") from err
else:
subprocess.run(args, check=True)
def run_bincapture(args: List[str]) -> bytes:
"""run is like "subprocess.run(args, capture_out=True, text=False)",
but with helpful settings and obeys "with capture_output(out)".
"""
if _capturing:
try:
return subprocess.run(args, check=True, capture_output=True).stdout
except subprocess.CalledProcessError as err:
raise Exception(f"{err.stderr.decode('UTF-8')}{err}") from err
else:
return subprocess.run(args, check=True, stdout=subprocess.PIPE).stdout
def run_txtcapture(args: List[str]) -> str:
"""run is like "subprocess.run(args, capture_out=True, text=true)",
but with helpful settings and obeys "with capture_output(out)".
"""
if _capturing:
try:
out = subprocess.run(args, check=True, capture_output=True, text=True).stdout
except subprocess.CalledProcessError as err:
raise Exception(f"{err.stderr}{err}") from err
else:
out = subprocess.run(args, check=True, stdout=subprocess.PIPE, text=True).stdout
if out.endswith("\n"):
out = out[:-1]
return out
@contextmanager
def capture_output(log: io.StringIO) -> Generator[None, None, None]:
"""capture_output is like contextlib.redirect_stdout but also
redirects stderr, and also does some extra stuff so that we can
have run/run_bincapture/run_txtcapture functions that obey it.
"""
global _capturing
saved_capturing = _capturing
saved_stdout = sys.stdout
saved_stderr = sys.stderr
_capturing = True
sys.stdout = sys.stderr = log
try:
yield
finally:
_capturing = saved_capturing
sys.stdout = saved_stdout
sys.stderr = saved_stderr
def _lex_char_or_cs(text: str) -> Tuple[str, str]:
"""Look atthe beginning of the given text and trim either a byte, or
an ANSI control sequence from the beginning, returning a tuple
("char-or-cs", "remaining-text"). If it looks like the text is a
truncated control seqence, then it doesn't trim anything, and
returns ("", "original"); signaling that it needs to wait for more
input before successfully lexing anything.
"""
if text == '\033':
# wait to see if this is a control sequence
return '', text
i = 1
if text.startswith('\033['):
try:
i = len('\033[')
while text[i] not in string.ascii_letters:
i += 1
i += 1
except IndexError:
# wait for a complete control sequence
return '', text
return text[:i], text[i:]
class Indent(io.StringIO):
"""Indent() is like a io.StringIO(), will indent text with the given
string.
"""
def __init__(self, indent: str = "", output: Optional[TextIO] = None, columns: Optional[int] = None) -> None:
"""Arguments:
indent: str: The string to indent with.
output: Optional[TextIO]: A TextIO to write to, instead of
building an in-memory buffer.
columns: Optional[int]: How wide the terminal is; this is
imporant because a line wrap needs to trigger an
indent. If not given, then 'output.columns' is
used if 'output' is set and has a 'columns'
attribute, otherwise shutil.get_terminal_size() is
used. Use a value <= 0 to explicitly disable
wrapping.
The 'columns' attribute on the resulting object is set to the
number of usable colums; "arg_columns - len(indent)". This
allows Indent objects to be nested.
Indent understands "\r" and "\n", but not "\t" or ANSI control
sequences that move the cursor; it assumes that all ANSI
control sequences do not move the cursor.
"""
super().__init__()
self._indent = indent
self._output = output
if columns is None:
if output and hasattr(output, 'columns'):
columns = output.columns # type: ignore
else:
columns = shutil.get_terminal_size().columns
self.columns = columns - len(self._indent)
_rest = ""
_cur_col = 0
# 0: no indent has been printed for this line, and indent will need to be printed unless this is the final trailing NL
# 1: an indent needs to be printed for this line IFF there is any more output on it
# 2: no indent (currently) needs to be printed for this line
_print_indent = 0
def write(self, text: str) -> int:
# This algorithm is based on
# https://git.parabola.nu/packages/libretools.git/tree/src/chroot-tools/indent
self._rest += text
while self._rest:
c, self._rest = _lex_char_or_cs(self._rest)
if c == "":
# wait for more input
break
elif c == "\n":
if self._print_indent < 1:
self._write(self._indent)
self._write(c)
self._print_indent = 0
self._cur_col = 0
elif c == "\r":
self._write(c)
self._print_indent = min(self._print_indent, 1)
self._cur_col = 0
elif c.startswith('\033['):
if self._print_indent < 2:
self._write(self._indent)
self._write(c)
self._print_indent = 2
elif self.columns > 0 and self._cur_col >= self.columns:
self._rest = "\n" + c + self._rest
else:
if self._print_indent < 2:
self._write(self._indent)
self._write(c)
self._print_indent = 2
self._cur_col += len(c)
return len(text)
def _write(self, text: str) -> None:
if self._output:
self._output.write(text)
else:
super().write(text)
def flush(self) -> None:
if self._output:
self._output.flush()
else:
super().flush()
def input(self) -> str:
"""Use "myindent.input()" instead of "input()" in order to nest well
with LineTrackers.
"""
if hasattr(self._output, 'input'):
text: str = self._output.input() # type: ignore
else:
text = input()
return text
class LineTracker(io.StringIO):
"""LineTracker() is like a io.StringIO(), but will keep track of which
line you're on; starting on line "1".
LineTracker understands "\n", and the "cursor-up" (CSI-A) control
sequence. It does not detect wrapped lines; use Indent() to turn
those in to hard-wraps that LineTracker understands.
"""
def __init__(self, output: Optional[TextIO] = None) -> None:
self._output = output
if output and hasattr(output, 'columns'):
self.columns = output.columns # type: ignore
cur_line = 1
_rest = ""
def _handle(self, text: str) -> None:
self._rest += text
while self._rest:
c, self._rest = _lex_char_or_cs(self._rest)
if c == "":
# wait for more input
break
elif c == "\n":
self.cur_line += 1
elif c.startswith("\033[") and c.endswith('A'):
lines = int(c[len("\033["):-len('A')] or "1")
self.cur_line -= lines
def input(self) -> str:
"""Use "mylinetracker.input()" instead of "input()" to avoid the
LineTracker not seeing any newlines input by the user.
"""
if hasattr(self._output, 'input'):
text: str = self._output.input() # type: ignore
else:
text = input()
self._handle(text + "\n")
return text
def goto_line(self, line: int) -> None:
"""goto_line moves the cursor to the beginning of the given line;
where line 1 is the line that the LineTracker started on, line
0 is the line above that, and line 1 is the line below
that.
"""
self.write("\r")
if line < self.cur_line:
total_lines = shutil.get_terminal_size().lines
if (self.cur_line - line) >= total_lines:
raise Exception(f"cannot go back {self.cur_line - line} lines (limit={total_lines - 1})")
self.write(ansiterm.cursor_up(self.cur_line - line))
else:
self.write("\n" * (line - self.cur_line))
def write(self, text: str) -> int:
self._handle(text)
if self._output:
return self._output.write(text)
else:
return super().write(text)
def flush(self) -> None:
if self._output:
self._output.flush()
else:
super().flush()
class Checker:
"""Checker is a terminal UI widget for printing a series of '[....]'
(running) / '[ OK ]' / '[FAIL]' checks where we can diagnostic
output while the check is running, and then go back and update the
status, and nest checks.
"""
ok: bool = True
@contextmanager
def check(self, name: str, clear_on_success: bool = True) -> Generator['CheckResult', None, None]:
"""check returns a context manager that handles printing a '[....]' /
'[ OK ]' / '[FAIL]' check. While the check is running, it
will stream whatever you write to stdout/stderr. If
clear_on_success is True, then once the check finishes, if the
check passed then it will erase that stdout/stderr output,
since you probably only want diagnostic output if the check
fails.
You can provide a (1-line) textual check result that will be
shown on both success and failure by writing to "mycheck.result".
You may cause a check to fail by either raising an Exception,
or by setting "mycheck.ok = False". If you do neither of these,
then the check will be considered to pass.
The mycheck.subcheck method returns a context manager for a
nested child check.
"""
def line(status: str, rest: Optional[str] = None) -> str:
txt = name
if rest:
txt = f'{txt}: {rest}'
return f" {status}{ansiterm.sgr} {txt}"
output = LineTracker(output=sys.stdout)
output.write(line(status=f'{ansiterm.sgr.bold.fg_blu}[....]') + "\n")
check = CheckResult()
with capture_output(Indent(output=output, indent=" > ")):
try:
yield check
except Exception as err:
if str(err).strip():
print(err)
check.ok = False
end = output.cur_line
output.goto_line(1)
if check.ok:
output.write(line(status=f'{ansiterm.sgr.bold.fg_grn}[ OK ]', rest=check.result))
else:
output.write(line(status=f'{ansiterm.sgr.bold.fg_red}[FAIL]', rest=check.result))
if check.ok and clear_on_success:
output.write(ansiterm.clear_rest_of_screen + "\n")
else:
output.write(ansiterm.clear_rest_of_line)
output.goto_line(end)
self.ok &= check.ok
# alias for readability
subcheck = check
class CheckResult(Checker):
"""A CheckResult is the context manager type returned by
"Checker.check".
"""
result: Optional[str] = None
| |
from stomp.connect import StompConnection12
from stomp.exception import ConnectFailedException
import pyxb.utils.domutils as domutils
import darwinpush.xb.pushport as pp
from darwinpush.parser import Parser
from darwinpush import ftp, Source
import enum
import multiprocessing
import sys
import threading
import time
import zlib
import signal
import logging
log = logging.getLogger("darwinpush")
##### Code for STOMP debugging
#import logging
#console = logging.StreamHandler()
#console.setFormatter(logging.Formatter('[%(asctime)s] %(name)-12s %(levelname)-8s %(message)s'))
#logging.getLogger().addHandler(console)
#logging.getLogger().setLevel(logging.DEBUG)
#LOGGER = logging.getLogger('stomp')
#####
def listener_process(c, q, quit_event):
listener = c(q, quit_event)
signal.signal(signal.SIGINT, signal.SIG_IGN)
listener._run()
def parser_process(q_in, q_out, quit_event):
parser = Parser(q_in, q_out, quit_event)
signal.signal(signal.SIGINT, signal.SIG_IGN)
parser.run()
class ErrorType(enum.Enum):
DecompressionError = 1
ParseError = 2
class Error:
def __init__(self, error_type, payload, exception):
self._error_type = error_type
self._payload = payload
self._exception = exception
@property
def payload(self):
return self._payload
@property
def error_type(self):
return self._error_type
@property
def exception(self):
return self._exception
def __str__(self):
return str(self._exception)
def __repr__(self):
return str(self)
def has_method(_class, _method):
return callable(getattr(_class, _method, None))
class Client:
""" The object that acts as the Client to the National Rail enquries Darwin Push Port STOMP server.
You should instantiate an instance of this object, with the required parameters to act as the
client to the Darwin Push Port. Listeners registered with this object will be passed messages
that are received from the server once they have been turned into the relevant python object.
Args:
stomp_user: Your STOMP user name taken from the National Rail Open Data portal.
stomp_password: Your STOMP password taken from the National Rail Open Data portal.
stomp_queue: Your STOMP queue name taken from the National Rail Open Data portal.
listener: The class object (not an instance of it) for your Listener subclass.
"""
def __init__(self, stomp_user, stomp_password, stomp_queue, listener,
ftp_user=None, ftp_passwd=None):
self.stomp_user = stomp_user
self.stomp_password = stomp_password
self.stomp_queue = stomp_queue
self.ftp_user = ftp_user
self.ftp_passwd = ftp_passwd
self.auto_reconnect = True
self._quit_event = multiprocessing.Event()
self.listener_queue = multiprocessing.Queue()
self.parser_queue = multiprocessing.Queue()
self.listener_process = multiprocessing.Process(
target=listener_process,
args=(listener, self.listener_queue, self._quit_event))
self.parser_process = multiprocessing.Process(
target=parser_process,
args=(self.parser_queue, self.listener_queue, self._quit_event))
def _start_processes(self):
"""Start the parser and listener processes."""
self.listener_process.start()
self.parser_process.start()
def _stop_processes(self):
# Signal processes to quit
self._quit_event.set()
# Parser process: dummy message and quit
self.parser_queue.put((None, None, None))
print ("Sent dummy parser message")
self.parser_process.join()
# Listener process: dummy message and quit
self.listener_queue.put((None, None))
print ("Sent dummy listener message")
self.listener_process.join()
def connect(self, downtime=None, stomp=True):
"""Connect to the Darwin Push Port and start receiving messages.
Args:
downtime: An int representing the number of seconds of downtime. It
can also be a datetime.timedelta representing the downtime.
If the number of seconds is:
<=0, then the snapshot for the day will be downloaded
and applied, and also all the logs.
NOT YET IMPLEMENTED.
>0, all the required logs are downloaded. This means no
logs if less than 5 min (300 s) downtime, as Darwin
holds 5 minutes of messages in the queue before it
pushes the log to the FTP server and removes the
messages from the waiting queue.
Set downtime to None to disable FTP logs and snapshots.
When the files from FTP are parsed, only the messages that
are timestamped by darwin as being sent starting from
`current_time - downtime` will be sent to the listener.
stomp: Whether to connecto to Darwin via stomp or not. Default is
True. If False, connect() just fetches the relevant files over
FTP, sends them to the listener, and quits; there is no need
to disconnect() when stomp is False.
"""
self._start_processes()
if downtime is not None:
self.ftp(downtime)
if stomp is True:
self._run()
else:
self._stop_processes()
def disconnect(self):
"""Disconnect from STOMP and nicely terminate the listener and parser
processes."""
self.connected = False
self._stop_processes()
def ftp(self, downtime):
"""Parse the FTP logs."""
ftp.fetchAll(self, downtime, user=self.ftp_user, passwd=self.ftp_passwd)
def _run(self):
self._connect()
# self.thread = threading.Thread(target=self._connect)
# self.thread.daemon = True
# self.thread.start()
def _connect(self):
self.client = StompClient()
self.client.connect(self.stomp_user, self.stomp_password, self.stomp_queue, self)
# while self.connected:
# time.sleep(1)
def on_ftp_message(self, message, source="FTP"):
self._on_message(None, message, source)
def _on_message(self, headers, message, source=None):
if type(message) == bytes:
message = message.decode("utf-8")
# Decode the message and parse it as an XML DOM.
doc = domutils.StringToDOM(message)
# Parse the record with pyXb.
m = pp.CreateFromDOM(doc.documentElement)
self.parser_queue.put((m, message, source))
def _on_error(self, headers, message):
print("Error: %s, %s" % (headers, message))
def _on_local_error(self, error):
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Caught Message Error in Client Thread +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
print(str(error))
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-")
def _on_disconnected(self):
print("Attempting to reconnect")
if self.auto_reconnect:
res = self.reconnect()
if res:
self.on_reconnected()
else:
self.on_disconnected()
else:
self.on_disconnected()
def _on_connected(self, headers, body):
self.on_connected(headers, body)
def on_disconnected(self):
"""Called when STOMP was disconnected, and a few connection ."""
pass
def on_connected(self, headers, body):
"""Called when the connection to STOMP was successful the first time."""
pass
def on_reconnected(self):
"""Called after a successful reconnection which was triggered by a
previous connection problem."""
pass
def reconnect(self, retries=3, delay=5):
"""Attempt to reconnect to STOMP.
Args:
retries: Number of times to try again. Set <=0 for infinite retries.
delay: Delay in seconds between retries.
Return:
True if everything went fine, false otherwise.
"""
retry = 0
while retry < retries or retries <= 0:
log.debug("Trying to reconnect, %d..." % retry)
try:
self.client.connect(self.stomp_user, self.stomp_password, self.stomp_queue, self)
log.debug("Reconnection successful at try %d." % retry)
return True
except ConnectFailedException as e:
log.debug("(retry %d) STOMP Conneciton error: %s" % (retry, e))
retry += 1
time.sleep(delay)
def run(self):
while 1:
time.sleep(1)
class StompClient:
def connect(self, user, password, queue, callback_object):
log.debug("StompClient.connect()")
self.cb = callback_object
self.conn = StompConnection12([("datafeeds.nationalrail.co.uk", 61613)], auto_decode=False)
self.conn.set_listener('', self)
self.conn.start()
self.conn.connect(user, password)
self.conn.subscribe("/queue/"+queue, ack='auto', id='1')
def on_error(self, headers, message):
log.debug("StompClient.onError(headers={}, message={})".format(headers, message))
if has_method(self.cb, "_on_error"):
self.cb._on_error(headers, message)
def on_connecting(self, host_and_port):
log.debug("StompClient.onConnecting(host_and_port={})".format(host_and_port))
if has_method(self.cb, "_on_connecting"):
self.cb._on_connecting(host_and_port)
def on_connected(self, headers, body):
log.debug("StompClient.onConnected(headers={}, body={})".format(headers, body))
if has_method(self.cb, "_on_connected"):
self.cb._on_connected(headers, body)
def on_disconnected(self):
log.debug("StompClient.onDisconnected()")
if has_method(self.cb, "_on_disconnected"):
self.cb._on_disconnected()
def on_local_error(self, error):
if has_method(self.cb, "_on_local_error"):
self.cb._on_local_error(error)
def on_message(self, headers, message):
log.debug("StompClient.onMessage(headers={}, body=<truncated>)".format(headers))
if has_method(self.cb, "_on_message"):
try:
decompressed_data = zlib.decompress(message, 16+zlib.MAX_WBITS)
try:
self.cb._on_message(headers, decompressed_data, Source.stomp)
except Exception as e:
log.exception("Exception occurred parsing DARWIN message: {}.".format(decompressed_data))
self.on_local_error(Error(ErrorType.ParseError, decompressed_data, e))
except Exception as e:
log.exception("Exception occurred decompressing the STOMP message.")
self.on_local_error(Error(ErrorType.DecompressionError, (headers, message), e))
| |
# -*- coding: utf-8 -*-
"""
Download API.
"""
# Standard library imports
from collections import deque
import json
import os
import sys
# Third part imports
from qtpy.QtCore import QByteArray, QObject, QTimer, QThread, QUrl, Signal
from qtpy.QtNetwork import QNetworkAccessManager, QNetworkRequest
import requests
# Local imports
from conda_manager.api.conda_api import CondaAPI
from conda_manager.utils.logs import logger
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string."""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
def handle_qbytearray(obj, encoding):
"""
Qt/Python3 compatibility helper.
"""
if isinstance(obj, QByteArray):
obj = obj.data()
return to_text_string(obj, encoding=encoding)
class DownloadWorker(QObject):
# url, path
sig_download_finished = Signal(str, str)
# url, path, progress_size, total_size
sig_download_progress = Signal(str, str, int, int)
sig_finished = Signal(object, object, object)
def __init__(self, url, path):
super(DownloadWorker, self).__init__()
self.url = url
self.path = path
self.finished = False
def is_finished(self):
return self.finished
class _DownloadAPI(QObject):
"""
Download API based on QNetworkAccessManager
"""
def __init__(self, chunk_size=1024):
super(_DownloadAPI, self).__init__()
self._chunk_size = chunk_size
self._head_requests = {}
self._get_requests = {}
self._paths = {}
self._workers = {}
self._manager = QNetworkAccessManager(self)
self._timer = QTimer()
# Setup
self._timer.setInterval(1000)
self._timer.timeout.connect(self._clean)
# Signals
self._manager.finished.connect(self._request_finished)
self._manager.sslErrors.connect(self._handle_ssl_errors)
def _handle_ssl_errors(self, reply, errors):
logger.error(str(('SSL Errors', errors)))
def _clean(self):
"""
Periodically check for inactive workers and remove their references.
"""
if self._workers:
for url in self._workers.copy():
w = self._workers[url]
if w.is_finished():
self._workers.pop(url)
self._paths.pop(url)
if url in self._get_requests:
self._get_requests.pop(url)
else:
self._timer.stop()
def _request_finished(self, reply):
url = to_text_string(reply.url().toEncoded(), encoding='utf-8')
if url in self._paths:
path = self._paths[url]
if url in self._workers:
worker = self._workers[url]
if url in self._head_requests:
self._head_requests.pop(url)
start_download = True
header_pairs = reply.rawHeaderPairs()
headers = {}
for hp in header_pairs:
headers[to_text_string(hp[0]).lower()] = to_text_string(hp[1])
total_size = int(headers.get('content-length', 0))
# Check if file exists
if os.path.isfile(path):
file_size = os.path.getsize(path)
# Check if existing file matches size of requested file
start_download = file_size != total_size
if start_download:
# File sizes dont match, hence download file
qurl = QUrl(url)
request = QNetworkRequest(qurl)
self._get_requests[url] = request
reply = self._manager.get(request)
error = reply.error()
if error:
logger.error(str(('Reply Error:', error)))
reply.downloadProgress.connect(
lambda r, t, w=worker: self._progress(r, t, w))
else:
# File sizes match, dont download file
worker.finished = True
worker.sig_download_finished.emit(url, path)
worker.sig_finished.emit(worker, path, None)
elif url in self._get_requests:
data = reply.readAll()
self._save(url, path, data)
def _save(self, url, path, data):
"""
"""
worker = self._workers[url]
path = self._paths[url]
if len(data):
with open(path, 'wb') as f:
f.write(data)
# Clean up
worker.finished = True
worker.sig_download_finished.emit(url, path)
worker.sig_finished.emit(worker, path, None)
self._get_requests.pop(url)
self._workers.pop(url)
self._paths.pop(url)
def _progress(self, bytes_received, bytes_total, worker):
"""
"""
worker.sig_download_progress.emit(
worker.url, worker.path, bytes_received, bytes_total)
def download(self, url, path):
"""
"""
# original_url = url
qurl = QUrl(url)
url = to_text_string(qurl.toEncoded(), encoding='utf-8')
logger.debug(str((url, path)))
if url in self._workers:
while not self._workers[url].finished:
return self._workers[url]
worker = DownloadWorker(url, path)
# Check download folder exists
folder = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(folder):
os.makedirs(folder)
request = QNetworkRequest(qurl)
self._head_requests[url] = request
self._paths[url] = path
self._workers[url] = worker
self._manager.head(request)
self._timer.start()
return worker
def terminate(self):
pass
class RequestsDownloadWorker(QObject):
"""
"""
sig_finished = Signal(object, object, object)
sig_download_finished = Signal(str, str)
sig_download_progress = Signal(str, str, int, int)
def __init__(self, method, args, kwargs):
super(RequestsDownloadWorker, self).__init__()
self.method = method
self.args = args
self.kwargs = kwargs
self._is_finished = False
def is_finished(self):
"""
"""
return self._is_finished
def start(self):
"""
"""
error = None
output = None
try:
output = self.method(*self.args, **self.kwargs)
except Exception as err:
error = err
logger.debug(str((self.method.__name__,
self.method.__module__,
error)))
self.sig_finished.emit(self, output, error)
self._is_finished = True
class _RequestsDownloadAPI(QObject):
"""
"""
_sig_download_finished = Signal(str, str)
_sig_download_progress = Signal(str, str, int, int)
def __init__(self):
super(QObject, self).__init__()
self._conda_api = CondaAPI()
self._queue = deque()
self._threads = []
self._workers = []
self._timer = QTimer()
self._chunk_size = 1024
self._timer.setInterval(1000)
self._timer.timeout.connect(self._clean)
def _clean(self):
"""
Periodically check for inactive workers and remove their references.
"""
if self._workers:
for w in self._workers:
if w.is_finished():
self._workers.remove(w)
if self._threads:
for t in self._threads:
if t.isFinished():
self._threads.remove(t)
else:
self._timer.stop()
def _start(self):
"""
"""
if len(self._queue) == 1:
thread = self._queue.popleft()
thread.start()
self._timer.start()
def _create_worker(self, method, *args, **kwargs):
"""
"""
# FIXME: this might be heavy...
thread = QThread()
worker = RequestsDownloadWorker(method, args, kwargs)
worker.moveToThread(thread)
worker.sig_finished.connect(self._start)
self._sig_download_finished.connect(worker.sig_download_finished)
self._sig_download_progress.connect(worker.sig_download_progress)
worker.sig_finished.connect(thread.quit)
thread.started.connect(worker.start)
self._queue.append(thread)
self._threads.append(thread)
self._workers.append(worker)
self._start()
return worker
def _download(self, url, path=None, force=False):
"""
"""
if path is None:
path = url.split('/')[-1]
# Make dir if non existent
folder = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(folder):
os.makedirs(folder)
# Start actual download
try:
r = requests.get(url, stream=True)
except Exception as error:
logger.error(str(error))
# Break if error found!
# self._sig_download_finished.emit(url, path)
# return path
total_size = int(r.headers.get('Content-Length', 0))
# Check if file exists
if os.path.isfile(path) and not force:
file_size = os.path.getsize(path)
# Check if existing file matches size of requested file
if file_size == total_size:
self._sig_download_finished.emit(url, path)
return path
# File not found or file size did not match. Download file.
progress_size = 0
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=self._chunk_size):
if chunk:
f.write(chunk)
progress_size += len(chunk)
self._sig_download_progress.emit(url, path,
progress_size,
total_size)
self._sig_download_finished.emit(url, path)
return path
def _is_valid_url(self, url):
try:
r = requests.head(url)
value = r.status_code in [200]
except Exception as error:
logger.error(str(error))
value = False
return value
def _is_valid_channel(self, channel,
conda_url='https://conda.anaconda.org'):
"""
"""
if channel.startswith('https://') or channel.startswith('http://'):
url = channel
else:
url = "{0}/{1}".format(conda_url, channel)
if url[-1] == '/':
url = url[:-1]
plat = self._conda_api.get_platform()
repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json')
try:
r = requests.head(repodata_url)
value = r.status_code in [200]
except Exception as error:
logger.error(str(error))
value = False
return value
def _is_valid_api_url(self, url):
"""
"""
# Check response is a JSON with ok: 1
data = {}
try:
r = requests.get(url)
content = to_text_string(r.content, encoding='utf-8')
data = json.loads(content)
except Exception as error:
logger.error(str(error))
return data.get('ok', 0) == 1
def download(self, url, path=None, force=False):
logger.debug(str((url, path, force)))
method = self._download
return self._create_worker(method, url, path=path, force=force)
def terminate(self):
for t in self._threads:
t.quit()
self._thread = []
self._workers = []
def is_valid_url(self, url, non_blocking=True):
logger.debug(str((url)))
if non_blocking:
method = self._is_valid_url
return self._create_worker(method, url)
else:
return self._is_valid_url(url)
def is_valid_api_url(self, url, non_blocking=True):
logger.debug(str((url)))
if non_blocking:
method = self._is_valid_api_url
return self._create_worker(method, url)
else:
return self._is_valid_api_url(url=url)
def is_valid_channel(self, channel,
conda_url='https://conda.anaconda.org',
non_blocking=True):
logger.debug(str((channel, conda_url)))
if non_blocking:
method = self._is_valid_channel
return self._create_worker(method, channel, conda_url)
else:
return self._is_valid_channel(channel, conda_url=conda_url)
DOWNLOAD_API = None
REQUESTS_DOWNLOAD_API = None
def DownloadAPI():
global DOWNLOAD_API
if DOWNLOAD_API is None:
DOWNLOAD_API = _DownloadAPI()
return DOWNLOAD_API
def RequestsDownloadAPI():
global REQUESTS_DOWNLOAD_API
if REQUESTS_DOWNLOAD_API is None:
REQUESTS_DOWNLOAD_API = _RequestsDownloadAPI()
return REQUESTS_DOWNLOAD_API
def ready_print(worker, output, error):
print(worker.method.__name__, output, error)
def test():
from conda_manager.utils.qthelpers import qapplication
urls = ['http://repo.continuum.io/pkgs/free/linux-64/repodata.json.bz2',
'https://conda.anaconda.org/anaconda/linux-64/repodata.json.bz2',
'https://conda.anaconda.org/asmeurer/linux-64/repodata.json.bz2',
]
path = os.sep.join([os.path.expanduser('~'), 'testing-download'])
app = qapplication()
api = DownloadAPI()
for i, url in enumerate(urls):
filepath = os.path.join(path, str(i) + '.json.bz2')
api.download(url, filepath)
print('Downloading', url, filepath)
path = os.sep.join([os.path.expanduser('~'), 'testing-download-requests'])
api = RequestsDownloadAPI()
urls += ['asdasdasdad']
for i, url in enumerate(urls):
worker = api.is_valid_url(url)
worker.url = url
worker.sig_finished.connect(ready_print)
filepath = os.path.join(path, str(i) + '.json.bz2')
worker = api.download(url, path=filepath, force=True)
worker.sig_finished.connect(ready_print)
api = RequestsDownloadAPI()
print(api._is_valid_api_url('https://api.anaconda.org'))
print(api._is_valid_api_url('https://conda.anaconda.org'))
print(api._is_valid_channel('https://google.com'))
print(api._is_valid_channel('https://conda.anaconda.org/continuumcrew'))
app.exec_()
if __name__ == '__main__':
test()
| |
# coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import logging
import os
import re
import sys
import threading
from naarad.metrics.metric import Metric
import naarad.utils
from naarad.naarad_constants import important_sub_metrics_import
logger = logging.getLogger('naarad.metrics.GCMetric')
class GCMetric(Metric):
""" Class for GC logs, deriving from class Metric """
bin_path = os.path.dirname(sys.argv[0])
clock_format = '%Y-%m-%d %H:%M:%S'
rate_types = ()
val_types = ('alloc', 'promo', 'used0', 'used1', 'used', 'commit0', 'commit1', 'commit', 'gen0', 'gen0t', 'gen0usr', 'gen0sys', 'gen0real',
'cmsIM', 'cmsRM', 'cmsRS', 'GCPause', 'cmsCM', 'cmsCP', 'cmsCS', 'cmsCR', 'safept', 'apptime', 'used0AfterGC', 'used1AfterGC', 'usedAfterGC',
'gen1t', 'g1-pause-young', 'g1-pause-mixed', 'g1-pause-remark', 'g1-pause-cleanup', 'g1-pause-remark.ref-proc', 'g1-pause-young.parallel',
'g1-pause-young.parallel.gcworkers', 'g1-pause-young.parallel.ext-root-scanning.avg', 'g1-pause-young.parallel.ext-root-scanning.max',
'g1-pause-young.parallel.update-rs.avg', 'g1-pause-young.parallel.update-rs.max', 'g1-pause-young.parallel.update-rs.processed-buffers.avg',
'g1-pause-young.parallel.update-rs.processed-buffers.max', 'g1-pause-young.parallel.scan-rs.avg', 'g1-pause-young.parallel.scan-rs.max',
'g1-pause-young.parallel.object-copy-rs.avg', 'g1-pause-young.parallel.object-copy-rs.max', 'g1-pause-young.parallel.termination.avg',
'g1-pause-young.parallel.termination.max', 'g1-pause-young.parallel.gc-worker-other.avg', 'g1-pause-young.parallel.gc-worker-other.max',
'g1-pause-young.parallel.gc-worker-total.avg', 'g1-pause-young.parallel.gc-worker-total.max', 'g1-pause-young.parallel.gc-worker-end.avg',
'g1-pause-young.parallel.gc-worker-end.max', 'g1-pause-young.code-root-fixup', 'g1-pause-young.clear-ct', 'g1-pause-young.other',
'g1-pause-young.other.choose-cset', 'g1-pause-young.other.ref-proc', 'g1-pause-young.other.reg-enq', 'g1-pause-young.other.free-cset',
'g1-pause-mixed.parallel', 'g1-pause-mixed.parallel.gcworkers', 'g1-pause-mixed.parallel.ext-root-scanning.avg',
'g1-pause-mixed.parallel.ext-root-scanning.max', 'g1-pause-mixed.parallel.update-rs.avg', 'g1-pause-mixed.parallel.update-rs.max',
'g1-pause-mixed.parallel.update-rs.processed-buffers.avg', 'g1-pause-mixed.parallel.update-rs.processed-buffers.max',
'g1-pause-mixed.parallel.scan-rs.avg', 'g1-pause-mixed.parallel.scan-rs.max', 'g1-pause-mixed.parallel.object-copy-rs.avg',
'g1-pause-mixed.parallel.object-copy-rs.max', 'g1-pause-mixed.parallel.termination.avg', 'g1-pause-mixed.parallel.termination.max',
'g1-pause-mixed.parallel.gc-worker-other.avg', 'g1-pause-mixed.parallel.gc-worker-other.max', 'g1-pause-mixed.parallel.gc-worker-total.avg',
'g1-pause-mixed.parallel.gc-worker-total.max', 'g1-pause-mixed.parallel.gc-worker-end.avg', 'g1-pause-mixed.parallel.gc-worker-end.max',
'g1-pause-mixed.code-root-fixup', 'g1-pause-mixed.clear-ct', 'g1-pause-mixed.other', 'g1-pause-mixed.other.choose-cset',
'g1-pause-mixed.other.ref-proc', 'g1-pause-mixed.other.reg-enq', 'g1-pause-mixed.other.free-cset', 'g1-pause-young.parallel.gc-worker-start.avg',
'g1-pause-young.parallel.gc-worker-start.max', 'g1-pause-mixed.parallel.gc-worker-start.avg', 'g1-pause-mixed.parallel.gc-worker-start.max',
'g1-eden-occupancy-before-gc', 'g1-eden-capacity-before-gc', 'g1-eden-occupancy-after-gc', 'g1-eden-capacity-after-gc', 'g1-survivor-before-gc',
'g1-survivor-after-gc', 'g1-heap-occupancy-before-gc', 'g1-heap-capacity-before-gc', 'g1-heap-occupancy-after-gc', 'g1-heap-capacity-after-gc',
'g1-young-cpu.sys', 'g1-young-cpu.usr', 'g1-young-cpu.real', 'g1-mixed-cpu.usr', 'g1-mixed-cpu.sys', 'g1-mixed-cpu.real')
def __init__(self, metric_type, infile_list, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, **other_options):
Metric.__init__(self, metric_type, infile_list, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics)
if not self.important_sub_metrics:
self.important_sub_metrics = important_sub_metrics_import['GC']
self.sub_metrics = self.val_types
self.beginning_ts = None
self.beginning_date = None
for (key, val) in other_options.iteritems():
if key == 'gc-options' or key == 'sub_metrics':
self.sub_metrics = val.split()
else:
setattr(self, key, val)
self.sub_metric_description = {
'gen0': 'young gen collection time, excluding gc_prologue & gc_epilogue',
'gen0t': 'young gen collection time, including gc_prologue & gc_epilogue',
'gen0usr': 'young gen collection time in cpu user secs',
'gen0sys': 'young gen collection time in cpu sys secs',
'gen0real': 'young gen collection time in elapsed secs',
'gen1i': 'train generation incremental collection',
'gen1t': 'old generation collection or full GC',
'cmsIM': 'CMS initial mark pause',
'cmsRM': 'CMS remark pause',
'cmsRS': 'CMS resize pause',
'GCPause': 'all stop-the-world GC pauses',
'cmsCM': 'CMS concurrent mark phase',
'cmsCP': 'CMS concurrent preclean phase',
'cmsCS': 'CMS concurrent sweep phase',
'cmsCR': 'CMS concurrent reset phase',
'alloc': 'object allocation in MB (approximate***)',
'promo': 'object promotion in MB (approximate***)',
'used0': 'young gen used memory size (before gc)',
'used1': 'old gen used memory size (before gc)',
'used': 'heap space used memory size (before gc) (excludes perm gen)',
'commit0': 'young gen committed memory size (after gc)',
'commit1': 'old gen committed memory size (after gc)',
'commit': 'heap committed memory size (after gc) (excludes perm gen)',
'apptime': 'amount of time application threads were running',
'safept': 'amount of time the VM spent at safepoints (app threads stopped)',
'used0AfterGC': 'young gen used memory size (after gc)',
'used1AfterGC': 'old gen used memory size (after gc)',
'usedAfterGC': 'heap space used memory size (after gc)',
'g1-pause-young': 'G1 Young GC Pause (seconds)',
'g1-pause-mixed': 'G1 Mixed GC Pause (seconds)',
'g1-pause-remark': 'G1 Remark Pause (seconds)',
'g1-pause-cleanup': 'G1 Cleanup Pause (seconds)',
'g1-pause-remark.ref-proc': 'G1 Remark: Reference Processing (seconds)',
'g1-pause-young.parallel': 'G1 Young GC Pause: Parallel Operations (ms)',
'g1-pause-young.parallel.gcworkers': 'G1 Young GC Pause: Number of Parallel GC Workers',
'g1-pause-young.parallel.gc-worker-start.avg': 'G1 Young GC Pause : Parallel : Avg Time spent in GC worker start (ms)',
'g1-pause-young.parallel.gc-worker-start.max': 'G1 Young GC Pause : Parallel : Max Time spent in GC worker start (ms)',
'g1-pause-young.parallel.ext-root-scanning.avg': 'G1 Young GC Pause: Avg Time spent in ext-root-scanning',
'g1-pause-young.parallel.ext-root-scanning.max': 'G1 Young GC Pause: Max Time spent in ext-root-scanning',
'g1-pause-young.parallel.update-rs.avg': 'G1 Young GC Pause: Parallel : Avg Time spent in updating Rsets',
'g1-pause-young.parallel.update-rs.max': 'G1 Young GC Pause: Parallel : Max Time spent in updating Rsets',
'g1-pause-young.parallel.update-rs.processed-buffers.avg': 'G1 Young GC Pause : Parallel : Update Rset : Avg number of processed buffers',
'g1-pause-young.parallel.update-rs.processed-buffers.max': 'G1 Young GC Pause : Parallel : Update Rset : Max number of processed buffers',
'g1-pause-young.parallel.scan-rs.avg': 'G1 Young GC Pause: Parallel : Avg Time spent in scanning Rsets',
'g1-pause-young.parallel.scan-rs.max': 'G1 Young GC Pause: Parallel : Max Time spent in scannning Rsets',
'g1-pause-young.parallel.object-copy-rs.avg': 'G1 Young GC Pause : Parallel : Avg Time spent in Object Copy',
'g1-pause-young.parallel.object-copy-rs.max': 'G1 Young GC Pause : Parallel : Max Time spent in Object Copy',
'g1-pause-young.parallel.termination.avg': 'G1 Young GC Pause : Parallel : Avg Time spent in termination',
'g1-pause-young.parallel.termination.max': 'G1 Young GC Pause : Parallel : Max Time spent in termination',
'g1-pause-young.parallel.gc-worker-other.avg': 'G1 Young GC Pause : Parallel : Avg Time spent in other',
'g1-pause-young.parallel.gc-worker-other.max': 'G1 Young GC Pause : Parallel : Max Time spent in other',
'g1-pause-young.parallel.gc-worker-total.avg': 'G1 Young GC Pause : Parallel : Avg Total time for GC worker',
'g1-pause-young.parallel.gc-worker-total.max': 'G1 Young GC Pause : Parallel : Max Total time for GC worker',
'g1-pause-young.parallel.gc-worker-end.avg': 'G1 Young GC Pause : Parallel : Avg Time for GC worker end',
'g1-pause-young.parallel.gc-worker-end.max': 'G1 Young GC Pause : Parallel : Max Time for GC worker end',
'g1-pause-young.code-root-fixup': 'G1 Young GC Pause : Time spent in code root fixup (ms)',
'g1-pause-young.clear-ct': 'G1 Young GC Pause: Time spent in clear ct (ms)',
'g1-pause-young.other': 'G1 Young GC Pause: Time spent in other (ms)',
'g1-pause-young.other.choose-cset': 'G1 Young GC Pause : Other : Time spent in choosing CSet (ms)',
'g1-pause-young.other.ref-proc': 'G1 Young GC Pause : Other : Time spent in reference processing (ms)',
'g1-pause-young.other.reg-enq': 'G1 Young GC Pause : Other : Time spent in reg-enq(ms)',
'g1-pause-young.other.free-cset': 'G1 Young GC Pause : Other : Time spent in processing free Cset(ms)',
'g1-pause-mixed.parallel': 'G1 Mixed GC Pause: Parallel Operations (ms)',
'g1-pause-mixed.parallel.gcworkers': 'G1 Mixed GC Pause: Number of Parallel GC Workers',
'g1-pause-mixed.parallel.gc-worker-start.avg': 'G1 Mixed GC Pause : Parallel : Avg Time spent in GC worker start (ms)',
'g1-pause-mixed.parallel.gc-worker-start.max': 'G1 Mixed GC Pause : Parallel : Max Time spent in GC worker start (ms)',
'g1-pause-mixed.parallel.ext-root-scanning.avg': 'G1 Mixed GC Pause: Avg Time spent in ext-root-scanning',
'g1-pause-mixed.parallel.ext-root-scanning.max': 'G1 Mixed GC Pause: Max Time spent in ext-root-scanning',
'g1-pause-mixed.parallel.update-rs.avg': 'G1 Mixed GC Pause: Parallel : Avg Time spent in updating Rsets',
'g1-pause-mixed.parallel.update-rs.max': 'G1 Mixed GC Pause: Parallel : Max Time spent in updating Rsets',
'g1-pause-mixed.parallel.update-rs.processed-buffers.avg': 'G1 Mixed GC Pause : Parallel : Update Rset : Avg number of processed buffers',
'g1-pause-mixed.parallel.update-rs.processed-buffers.max': 'G1 Mixed GC Pause : Parallel : Update Rset : Max number of processed buffers',
'g1-pause-mixed.parallel.scan-rs.avg': 'G1 Mixed GC Pause: Parallel : Avg Time spent in scanning Rsets',
'g1-pause-mixed.parallel.scan-rs.max': 'G1 Mixed GC Pause: Parallel : Max Time spent in scannning Rsets',
'g1-pause-mixed.parallel.object-copy-rs.avg': 'G1 Mixed GC Pause : Parallel : Avg Time spent in Object Copy',
'g1-pause-mixed.parallel.object-copy-rs.max': 'G1 Mixed GC Pause : Parallel : Max Time spent in Object Copy',
'g1-pause-mixed.parallel.termination.avg': 'G1 Mixed GC Pause : Parallel : Avg Time spent in termination',
'g1-pause-mixed.parallel.termination.max': 'G1 Mixed GC Pause : Parallel : Max Time spent in termination',
'g1-pause-mixed.parallel.gc-worker-other.avg': 'G1 Mixed GC Pause : Parallel : Avg Time spent in other',
'g1-pause-mixed.parallel.gc-worker-other.max': 'G1 Mixed GC Pause : Parallel : Max Time spent in other',
'g1-pause-mixed.parallel.gc-worker-total.avg': 'G1 Mixed GC Pause : Parallel : Avg Total time for GC worker',
'g1-pause-mixed.parallel.gc-worker-total.max': 'G1 Mixed GC Pause : Parallel : Max Total time for GC worker',
'g1-pause-mixed.parallel.gc-worker-end.avg': 'G1 Mixed GC Pause : Parallel : Avg Time for GC worker end',
'g1-pause-mixed.parallel.gc-worker-end.max': 'G1 Mixed GC Pause : Parallel : Max Time for GC worker end',
'g1-pause-mixed.code-root-fixup': 'G1 Mixed GC Pause : Time spent in code root fixup (ms)',
'g1-pause-mixed.clear-ct': 'G1 Mixed GC Pause: Time spent in clear ct (ms)',
'g1-pause-mixed.other': 'G1 Mixed GC Pause: Time spent in other (ms)',
'g1-pause-mixed.other.choose-cset': 'G1 Mixed GC Pause : Other : Time spent in choosing CSet (ms)',
'g1-pause-mixed.other.ref-proc': 'G1 Mixed GC Pause : Other : Time spent in reference processing (ms)',
'g1-pause-mixed.other.reg-enq': 'G1 Mixed GC Pause : Other : Time spent in reg-enq(ms)',
'g1-pause-mixed.other.free-cset': 'G1 Mixed GC Pause : Other : Time spent in processing free Cset(ms)',
'g1-eden-occupancy-before-gc': 'G1 Eden Occupancy (MB) (Before GC)',
'g1-eden-capacity-before-gc': 'G1 Eden Capacity (MB) (Before GC)',
'g1-eden-occupancy-after-gc': 'G1 Eden Occupancy (MB) (After GC)',
'g1-eden-capacity-after-gc': 'G1 Eden Capacity (MB) (After GC)',
'g1-survivor-before-gc': 'G1 Survivor Size (MB) (Before GC)',
'g1-survivor-after-gc': 'G1 Survivor Size (MB) (After GC)',
'g1-heap-occupancy-before-gc': 'G1 Heap Occupancy (MB) (Before GC)',
'g1-heap-capacity-before-gc': 'G1 Heap Capacity (MB) (Before GC)',
'g1-heap-occupancy-after-gc': 'G1 Heap Occupancy (MB) (After GC)',
'g1-heap-capacity-after-gc': 'G1 Heap Capacity (MB) (After GC)',
'g1-young-cpu.sys': 'G1 Young GC : sys cpu time (seconds)',
'g1-young-cpu.usr': 'G1 Young GC : usr cpu time (seconds)',
'g1-young-cpu.real': 'G1 Young GC : elapsed time (seconds)',
'g1-mixed-cpu.usr': 'G1 Mixed GC : usr cpu time (seconds)',
'g1-mixed-cpu.sys': 'G1 Mixed GC : sys cpu time (seconds)',
'g1-mixed-cpu.real': 'G1 Mixed GC : elapsed time (seconds)'
}
def parse(self):
prefix = os.path.join(self.resource_directory, self.label)
awk_cmd = os.path.join(self.bin_path, 'PrintGCStats')
gc_metrics = set(self.val_types) & set(self.sub_metrics)
if self.ts_start:
awk_cmd += ' -v ts_start="' + naarad.utils.get_standardized_timestamp(self.ts_start, None) + '"'
if self.ts_end:
awk_cmd += ' -v ts_end="' + naarad.utils.get_standardized_timestamp(self.ts_end, None) + '"'
cmd = "{0} -v plot={1} -v splitfiles=1 -v datestamps=1 -v plotcolumns=2 -v splitfileprefix={2} {3}".format(awk_cmd, ','.join(gc_metrics), prefix,
' '.join(self.infile_list))
logger.info("Parsing GC metric with cmd: %s", cmd)
os.system(cmd)
for gc_sub_metric in gc_metrics:
outcsv = self.get_csv(gc_sub_metric)
if naarad.utils.is_valid_file(outcsv):
self.csv_files.append(outcsv)
return True
| |
"""a navigable completer for the qtconsole"""
# coding : utf-8
#-----------------------------------------------------------------------------
# Copyright (c) 2012, IPython Development Team.$
#
# Distributed under the terms of the Modified BSD License.$
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# System library imports
import IPython.utils.text as text
from IPython.external.qt import QtCore, QtGui
#--------------------------------------------------------------------------
# Return an HTML table with selected item in a special class
#--------------------------------------------------------------------------
def html_tableify(item_matrix, select=None, header=None , footer=None) :
""" returnr a string for an html table"""
if not item_matrix :
return ''
html_cols = []
tds = lambda text : u'<td>'+text+u' </td>'
trs = lambda text : u'<tr>'+text+u'</tr>'
tds_items = [map(tds, row) for row in item_matrix]
if select :
row, col = select
tds_items[row][col] = u'<td class="inverted">'\
+item_matrix[row][col]\
+u' </td>'
#select the right item
html_cols = map(trs, (u''.join(row) for row in tds_items))
head = ''
foot = ''
if header :
head = (u'<tr>'\
+''.join((u'<td>'+header+u'</td>')*len(item_matrix[0]))\
+'</tr>')
if footer :
foot = (u'<tr>'\
+''.join((u'<td>'+footer+u'</td>')*len(item_matrix[0]))\
+'</tr>')
html = (u'<table class="completion" style="white-space:pre">'+head+(u''.join(html_cols))+foot+u'</table>')
return html
class SlidingInterval(object):
"""a bound interval that follows a cursor
internally used to scoll the completion view when the cursor
try to go beyond the edges, and show '...' when rows are hidden
"""
_min = 0
_max = 1
_current = 0
def __init__(self, maximum=1, width=6, minimum=0, sticky_lenght=1):
"""Create a new bounded interval
any value return by this will be bound between maximum and
minimum. usual width will be 'width', and sticky_length
set when the return interval should expand to max and min
"""
self._min = minimum
self._max = maximum
self._start = 0
self._width = width
self._stop = self._start+self._width+1
self._sticky_lenght = sticky_lenght
@property
def current(self):
"""current cursor position"""
return self._current
@current.setter
def current(self, value):
"""set current cursor position"""
current = min(max(self._min, value), self._max)
self._current = current
if current > self._stop :
self._stop = current
self._start = current-self._width
elif current < self._start :
self._start = current
self._stop = current + self._width
if abs(self._start - self._min) <= self._sticky_lenght :
self._start = self._min
if abs(self._stop - self._max) <= self._sticky_lenght :
self._stop = self._max
@property
def start(self):
"""begiiing of interval to show"""
return self._start
@property
def stop(self):
"""end of interval to show"""
return self._stop
@property
def width(self):
return self._stop - self._start
@property
def nth(self):
return self.current - self.start
class CompletionHtml(QtGui.QWidget):
""" A widget for tab completion, navigable by arrow keys """
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
_items = ()
_index = (0, 0)
_consecutive_tab = 0
_size = (1, 1)
_old_cursor = None
_start_position = 0
_slice_start = 0
_slice_len = 4
def __init__(self, console_widget):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
assert isinstance(console_widget._control, (QtGui.QTextEdit, QtGui.QPlainTextEdit))
super(CompletionHtml, self).__init__()
self._text_edit = console_widget._control
self._console_widget = console_widget
self._text_edit.installEventFilter(self)
self._sliding_interval = None
self._justified_items = None
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
def eventFilter(self, obj, event):
""" Reimplemented to handle keyboard input and to auto-hide when the
text edit loses focus.
"""
if obj == self._text_edit:
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
key = event.key()
if self._consecutive_tab == 0 and key in (QtCore.Qt.Key_Tab,):
return False
elif self._consecutive_tab == 1 and key in (QtCore.Qt.Key_Tab,):
# ok , called twice, we grab focus, and show the cursor
self._consecutive_tab = self._consecutive_tab+1
self._update_list()
return True
elif self._consecutive_tab == 2:
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):
self._complete_current()
return True
if key in (QtCore.Qt.Key_Tab,):
self.select_right()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Down,):
self.select_down()
self._update_list()
return True
elif key in (QtCore.Qt.Key_Right,):
self.select_right()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Up,):
self.select_up()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Left,):
self.select_left()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Escape,):
self.cancel_completion()
return True
else :
self.cancel_completion()
else:
self.cancel_completion()
elif etype == QtCore.QEvent.FocusOut:
self.cancel_completion()
return super(CompletionHtml, self).eventFilter(obj, event)
#--------------------------------------------------------------------------
# 'CompletionHtml' interface
#--------------------------------------------------------------------------
def cancel_completion(self):
"""Cancel the completion
should be called when the completer have to be dismissed
This reset internal variable, clearing the temporary buffer
of the console where the completion are shown.
"""
self._consecutive_tab = 0
self._slice_start = 0
self._console_widget._clear_temporary_buffer()
self._index = (0, 0)
if(self._sliding_interval):
self._sliding_interval = None
#
# ... 2 4 4 4 4 4 4 4 4 4 4 4 4
# 2 2 4 4 4 4 4 4 4 4 4 4 4 4
#
#2 2 x x x x x x x x x x x 5 5
#6 6 x x x x x x x x x x x 5 5
#6 6 x x x x x x x x x x ? 5 5
#6 6 x x x x x x x x x x ? 1 1
#
#3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 ...
#3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 ...
def _select_index(self, row, col):
"""Change the selection index, and make sure it stays in the right range
A little more complicated than just dooing modulo the number of row columns
to be sure to cycle through all element.
horizontaly, the element are maped like this :
to r <-- a b c d e f --> to g
to f <-- g h i j k l --> to m
to l <-- m n o p q r --> to a
and vertically
a d g j m p
b e h k n q
c f i l o r
"""
nr, nc = self._size
nr = nr-1
nc = nc-1
# case 1
if (row > nr and col >= nc) or (row >= nr and col > nc):
self._select_index(0, 0)
# case 2
elif (row <= 0 and col < 0) or (row < 0 and col <= 0):
self._select_index(nr, nc)
# case 3
elif row > nr :
self._select_index(0, col+1)
# case 4
elif row < 0 :
self._select_index(nr, col-1)
# case 5
elif col > nc :
self._select_index(row+1, 0)
# case 6
elif col < 0 :
self._select_index(row-1, nc)
elif 0 <= row and row <= nr and 0 <= col and col <= nc :
self._index = (row, col)
else :
raise NotImplementedError("you'r trying to go where no completion\
have gone before : %d:%d (%d:%d)"%(row, col, nr, nc) )
@property
def _slice_end(self):
end = self._slice_start+self._slice_len
if end > len(self._items) :
return None
return end
def select_up(self):
"""move cursor up"""
r, c = self._index
self._select_index(r-1, c)
def select_down(self):
"""move cursor down"""
r, c = self._index
self._select_index(r+1, c)
def select_left(self):
"""move cursor left"""
r, c = self._index
self._select_index(r, c-1)
def select_right(self):
"""move cursor right"""
r, c = self._index
self._select_index(r, c+1)
def show_items(self, cursor, items):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
if not items :
return
self._start_position = cursor.position()
self._consecutive_tab = 1
items_m, ci = text.compute_item_matrix(items, empty=' ')
self._sliding_interval = SlidingInterval(len(items_m)-1)
self._items = items_m
self._size = (ci['rows_numbers'], ci['columns_numbers'])
self._old_cursor = cursor
self._index = (0, 0)
sjoin = lambda x : [ y.ljust(w, ' ') for y, w in zip(x, ci['columns_width'])]
self._justified_items = map(sjoin, items_m)
self._update_list(hilight=False)
def _update_list(self, hilight=True):
""" update the list of completion and hilight the currently selected completion """
self._sliding_interval.current = self._index[0]
head = None
foot = None
if self._sliding_interval.start > 0 :
head = '...'
if self._sliding_interval.stop < self._sliding_interval._max:
foot = '...'
items_m = self._justified_items[\
self._sliding_interval.start:\
self._sliding_interval.stop+1\
]
self._console_widget._clear_temporary_buffer()
if(hilight):
sel = (self._sliding_interval.nth, self._index[1])
else :
sel = None
strng = html_tableify(items_m, select=sel, header=head, footer=foot)
self._console_widget._fill_temporary_buffer(self._old_cursor, strng, html=True)
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
i = self._index
item = self._items[i[0]][i[1]]
item = item.strip()
if item :
self._current_text_cursor().insertText(item)
self.cancel_completion()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
| |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method != "HEAD":
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'deflate, gzip'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| |
# formhandler/formhandler.py
# Lillian Lemmer <lillian.lynn.lemmer@gmail.com>
#
# This module is part of FormHandler and is released under the
# MIT license: http://opensource.org/licenses/MIT
"""formhandler: sometimes the web interface is an afterthought.
Automate the development of a web/CGI script interface to a function.
Tested functional with Python 2.7.6 and Python 3.4.
In a couple of commands, in one CGI script, use a function to:
1. Provide an HTML form interface for that function.
2. Provide the HTML-ified evaluation of sending corresponding
POST/GET fields to aforementioned function(s).
Includes tools for automatically converting data returned from a
function, to HTML, e.g., dict > html, list > html, etc.
I was tired of making and updating web interfaces to various scripts
I've written at work. Various people use various scripts through super
simple CGI scripts.
DEVELOPER NOTES:
- Needs some prettification; will probably use BeautifulSoup...
- Soon I'll include an example which is a SQLITE3 table editor.
- Will have automatic form validation (for defined argument data
types).
- Should mark arg fields as required!
- Needs kwarg and arg support!
"""
import os
import cgi
import cgitb; cgitb.enable()
import inspect
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
# CONFIG/CONSTANTS ############################################################
# these should all go to config module!
FORM = '''
<form enctype="multipart/form-data" method="post" class="formhandler">
<fieldset>
<legend>{title}</legend>
{about}
{hidden trigger field}
{fields}
<input type="submit" value="submit">
</fieldset>
</form>
'''
FIELD_DESCRIPTION = '''
<div class="fhandler-field_desc">
<label for="{name}">{label}</label>
<p class="fhandler-field_desc-help">{help}</p>
</div>
'''
SELECT = '''
<select name="{name}" id="{name}"{required}>
<optgroup label="{label}…">
{options}
</optgroup>
</select>
'''
HTML_INPUT_FIELD = '''
<input type="{input type}"
name="{name}"
id="{name}"{required}>
'''
FORM_DESCRIPTION = '''
<section class="form-help">
<pre>{0}</pre>
</section>
'''
BACK_TO_INPUT = '''
<form class="formhandler">
<input type="submit" value="Back to Form">
</form>
'''
ERROR_MISSING_ARGS = '<p><strong>Error:</strong> missing arguments: %s.</p>'
# GENERIC PYTHON DATA > HTML ##################################################
# Python variable name to user-friendly name.
var_title = lambda s: s.replace('_', ' ').title()
# Simply convert a string into HTML paragraphs.
paragraphs = lambda s: '\n\n'.join(['<p>%s</p>' % p for p in s.split('\n\n')])
def docstring_html(function):
"""Makes some nice HTML out of a function's DocString, attempting
Google's style guide (see: my code!).
A work in progress. I will try to use someone else's library
for this!
Boilerplate--AT THE MOMENT! Will be an actual parser in the future.
"""
return FORM_DESCRIPTION.format(inspect.getdoc(function))
def iter_dicts_table(iter_dicts, classes=None, check=False):
"""Convert an iterable sequence of dictionaries (all of which with
identical keys) to an HTML table.
Args:
iter_dicts (iter): an iterable sequence (list, tuple) of
dictionaries, dictionaries having identical keys.
classes (str): Is the substitute for <table class="%s">.
check (bool): check for key consistency!
Returns:
str|None: HTML tabular representation of a Python iterable sequence of
dictionaries which share identical keys. Returns None if table
is not consistent (dictionary keys).
"""
# check key consistency
if check:
first_keys = iter_dicts[0].keys()
for d in iter_dicts:
if d.keys() != first_keys:
return None
table_parts = {}
table_parts['classes'] = ' class="%s"' % classes if classes else ''
table_parts['thead'] = ''.join(['<th>%s</th>' % k for k in iter_dicts[0]])
# tbody
keys = ['<td>%(' + key + ')s</td>' for key in iter_dicts[0]]
row = '<tr>' + ''.join(keys) + '</tr>'
table_parts['tbody'] = '\n'.join([row % d for d in iter_dicts])
return '''
<table{classes}>
<thead>
<tr>{thead}</tr>
</thead>
<tbody>
{tbody}
</tbody>
</table>
'''.format(**table_parts)
# The juice ###################################################################
def get_params():
"""Get all fields send in get/post, use a well organized
dictionary!
Uses cgi.FieldStorage() so you can only use this once to get
your CGI field dictionary.
Returns:
dict: POST/GET fields. Key is field name. If value is file, value
becomes tuple(filename, file contents). Value could also either
be a string, or a list of values (as with check boxes).
{
'name': 'lillian mahoney',
'resume': ('configured_upload_dir/my_resume.pdf', file_data),
'availability': ['monday', 'tuesday', 'friday']
}
"""
params = {}
fields = cgi.FieldStorage()
for param in fields.keys():
if fields[param].filename:
params[param] = (fields[param].filename, fields[param].file.read())
continue
item = fields.getvalue(param)
if isinstance(item, list) or isinstance(item, str):
params[param] = item
return params
class Field(object):
def __init__(self, function, name, field_type=None, options=None,
label=None, argument=None, required=False, help_text=None):
"""HTML field representation of an argument.
Extends function argument meta data.
"""
self.field_type = field_type or 'text'
self.options = options
self.argument = argument or name
self.label = label or var_title(name)
self.help_text = ('<p>' + help_text + '</p>'
if help_text else ' ')
if self.argument in function.args:
self.required = True
else:
self.required = required
def __str__(self):
return self.to_html()
def to_html(self):
arg = {
'name': self.argument,
'help': self.help_text,
'type': self.field_type,
'label': self.label,
'required': ' required' if self.required else ' ',
}
if self.field_type == 'select':
option = '<option value="%s">%s</option>'
options = [option % (o, var_title(o)) for o in self.options]
arg['options'] = '\n'.join(options)
return (FIELD_DESCRIPTION + SELECT).format(**arg)
elif self.field_type in ['checkbox', 'radio']:
items = []
for option in self.options:
parts = {
'option': option,
'list type': self.field_type,
'option title': var_title(option),
}
parts.update(arg)
field = (FIELD_DESCRIPTION + HTML_INPUT_FIELD).format(**parts)
items.append(field)
return '\n'.join(items)
# pattern
elif self.field_type in ('text', 'file'):
parts = {'input type': self.field_type}
parts.update(arg)
return (FIELD_DESCRIPTION + HTML_INPUT_FIELD).format(**parts)
else:
raise Exception(('invalid arg type for %s' % argument_name,
arg['type'],))
class FuncPrep(object):
def __init__(self, function):
"""Prepares a function so that it may be used to generate
forms with form_handler()/FormHandler().
Automate the relations between HTML input fields and
the function arguments themselves.
Conform meta data about a function, as attributes of that
function.
"""
self.function = function
self.function.name = function.__name__
self.function.fields = {}
args, __, kwargs, __ = inspect.getargspec(self.function)
self.function.args = args or []
self.function.kwargs = kwargs or {}
def __call__(self, name, **kwargs):
"""Relate an HTML field to an argument.
The data is actually primarily about the HTML field itself!
Args:
**kwargs: keyword arguments for Field()
"""
# should use Field()
field = Field(self.function, name, **kwargs)
self.function.fields.update({field.argument: field})
return None
class Form(object):
def __init__(self, function):
"""HTML form and its resulting page (from function).
Args:
function (func): Function to use for generating the HTML form
and its resulting page.
"""
self.function = function
if not hasattr(function, 'fields'):
relations = FuncPrep(function)
self.evaluation = None
self.params = None
def to_form(self):
"""Returns the HTML input form for a function.
Returns:
str: HTML input form, for submitting arguments to this
very form.
"""
# Create HTML input labels and respective fields,
# based on (keyword) argument names and arg_map (above).
# Create said fields for required_fields and optional_fields.
fields = []
kwargs_keys = [k for k in self.function.kwargs]
for argument_name in (self.function.args + kwargs_keys):
if argument_name in self.function.fields:
fields.append(self.function.fields[argument_name].to_html())
else:
fields.append(Field(self.function, argument_name).to_html())
# build form_parts...
form_parts = {
'title': var_title(self.function.name),
'fields': '\n'.join(fields),
# Section describing this form, based on docstring.
'about': docstring_html(self.function) or '',
# Function name as hidden field so it may trigger the
# evaluation of the function upon form submission!
'hidden trigger field': ('<input type="hidden" id="{0}"'
'name="{0}" value="true">'
.format(self.function.name)),
}
return FORM.format(**form_parts)
def evaluate(self, form):
"""HTML pag eresulting from submitting this form's input form.
Args:
form (dict): See get_params().
Returns:
None: Sets self.params and self.evaluation.
"""
# If the function name is not in POST/GET, we're providing
# the HTML input form.
if self.function.name not in form:
self.evaluation = self.to_form()
self.params = None
return None
# Find corellated arguments in mind. Assume text input,
# unless noted in arg_map
arg_values = ([form[arg] for arg in self.function.args]
if self.function.args else None)
kwargs = ({k: form[k] for k in self.function.kwargs}
if self.function.kwargs else None)
# REQUIRED field missing in POST/GET.
if None in arg_values:
missing_args = list()
for i, value in enumerate(arg_values):
if value is None:
missing_args.append(self.args[i])
message = ERROR_MISSING_ARGS % ', '.join(missing_args)
self.evaluation = message
self.params = form
return None
if arg_values and kwargs:
evaluation = self.function(*arg_values, **kwargs)
elif arg_values:
evaluation = self.function(*arg_values)
elif kwargs:
evaluation = self.function(**kwargs)
# Now we must analyze the evaluation of the function, in order
# to properly format the results to HTML.
# ... Evaluation is a string; just use paragraph formatting.
if isinstance(evaluation, str):
self.evaluation = paragraphs(evaluation)
self.params = form
return None
# ... Evaluation is iterable sequence; further options below...
elif (isinstance(evaluation, list)
or isinstance(evaluation, tuple)):
# Evaluation is an iterable sequence of dictionaries?
if isinstance(evaluation[0], dict):
possible_table = iter_dicts_table(evaluation, check=True)
if possible_table:
self.evaluation = possible_table
self.params = form
return None
# Evaluation is simply a dictionary! Create a definition list.
elif isinstance(evaluation, dict):
pass
# This SHOULDN'T be possible.
raise Exception('Unhandled evaluation type!')
class FormHandler(object):
def __init__(self, *args):
"""Note: move TPL bull over here?"""
self.functions = args
self.relations = {}
for function in args:
form = FuncPrep(function)
setattr(self, form.function.name, form)
def html(self, replacements=None):
"""Just form_handler for multiple functions, while returning
ONLY the function evaluation on POST/GET.
"""
form = get_params()
output = ''
for function in self.functions:
handler = Form(function)
handler.evaluate(form)
if handler.params:
return handler.evaluation + BACK_TO_INPUT
else:
output += handler.evaluation
return output
| |
# urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(object):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error):
raise DecodeError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
if flush_decoder and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
| |
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db import connection
from django.template import Context, Template
from django.test import TestCase, ignore_warnings, override_settings
from django.test.client import RequestFactory
from django.test.utils import CaptureQueriesContext
from django.urls import reverse
from django.utils import formats, six
from django.utils.deprecation import RemovedInDjango20Warning
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, EventAdmin,
FilteredChildAdmin, GroupAdmin, InvitationAdmin,
NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin, SwallowAdmin,
site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def get_changelist_args(modeladmin, **kwargs):
m = modeladmin
args = (
kwargs.pop('list_display', m.list_display),
kwargs.pop('list_display_links', m.list_display_links),
kwargs.pop('list_filter', m.list_filter),
kwargs.pop('date_hierarchy', m.date_hierarchy),
kwargs.pop('search_fields', m.search_fields),
kwargs.pop('list_select_related', m.list_select_related),
kwargs.pop('list_per_page', m.list_per_page),
kwargs.pop('list_max_show_all', m.list_max_show_all),
kwargs.pop('list_editable', m.list_editable),
m,
)
assert not kwargs, "Unexpected kwarg %s" % kwargs
return args
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create_superuser(username=username, email='a@b.com', password='xxx')
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
cl = ChangeList(
request, Child,
*get_changelist_args(m, list_select_related=m.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'parent': {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Empty value display can be set on AdminSite.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">???</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-age_display">&dagger;</td><td class="field-age">-empty-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Inclusion tag result_list generates a table when with default
ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" />'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name" />'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
with self.assertRaises(IncorrectLookupParameters):
ChangeList(request, Child, *get_changelist_args(m))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_result_list_with_allow_tags(self):
"""
Test for deprecation of allow_tags attribute
"""
new_parent = Parent.objects.create(name='parent')
for i in range(2):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
def custom_method(self, obj=None):
return 'Unsafe html <br />'
custom_method.allow_tags = True
# Add custom method with allow_tags attribute
m.custom_method = custom_method
m.list_display = ['id', 'name', 'parent', 'custom_method']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
custom_field_html = '<td class="field-custom_method">Unsafe html <br /></td>'
self.assertInHTML(custom_field_html, table_output)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, *get_changelist_args(m))
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, *get_changelist_args(m))
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, *get_changelist_args(m))
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, *get_changelist_args(m))
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pk_in_search_fields(self):
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ['group__pk']
request = self.factory.get('/concert/', data={SEARCH_VAR: band.pk})
cl = ChangeList(request, Concert, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 1)
request = self.factory.get('/concert/', data={SEARCH_VAR: band.pk + 5})
cl = ChangeList(request, Concert, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 0)
def test_no_distinct_for_m2m_in_list_filter_without_params(self):
"""
If a ManyToManyField is in list_filter but isn't in any lookup params,
the changelist's query shouldn't have distinct.
"""
m = BandAdmin(Band, custom_site)
for lookup_params in ({}, {'name': 'test'}):
request = self.factory.get('/band/', lookup_params)
cl = ChangeList(request, Band, *get_changelist_args(m))
self.assertFalse(cl.queryset.query.distinct)
# A ManyToManyField in params does have distinct applied.
request = self.factory.get('/band/', {'genres': '0'})
cl = ChangeList(request, Band, *get_changelist_args(m))
self.assertTrue(cl.queryset.query.distinct)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
superuser = User.objects.create_superuser(username='super', email='super@localhost', password='secret')
self.client.force_login(superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
d = Swallow.objects.create(origin='Swallow D', load=9, speed=9)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(d.pk),
'form-1-uuid': str(c.pk),
'form-2-uuid': str(a.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
response = self.client.post(changelist_url, data, follow=True, extra={'o': '-2'})
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, '1 swallow was changed successfully.')
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data['form-2-load']))
self.assertEqual(a.speed, float(data['form-2-speed']))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data['form-1-load']))
self.assertEqual(c.speed, float(data['form-1-speed']))
d.refresh_from_db()
self.assertEqual(d.load, float(data['form-0-load']))
self.assertEqual(d.speed, float(data['form-0-speed']))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_get_edited_object_ids(self):
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
m = SwallowAdmin(Swallow, custom_site)
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(a.pk),
'form-1-uuid': str(b.pk),
'form-2-uuid': str(c.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
request = self.factory.post(changelist_url, data=data)
pks = m._get_edited_object_pks(request, prefix='form')
self.assertEqual(sorted(pks), sorted([str(a.pk), str(b.pk), str(c.pk)]))
def test_get_list_editable_queryset(self):
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
Swallow.objects.create(origin='Swallow B', load=2, speed=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(a.pk),
'form-0-load': '10',
'_save': 'Save',
}
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
m = SwallowAdmin(Swallow, custom_site)
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix='form')
self.assertEqual(queryset.count(), 1)
data['form-0-uuid'] = 'INVALD_PRIMARY_KEY'
# The unfiltered queryset is returned if there's invalid data.
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix='form')
self.assertEqual(queryset.count(), 2)
def test_changelist_view_list_editable_changed_objects_uses_filter(self):
"""list_editable edits use a filtered queryset to limit memory usage."""
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(a.pk),
'form-0-load': '10',
'form-1-uuid': str(b.pk),
'form-1-load': '10',
'_save': 'Save',
}
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
with CaptureQueriesContext(connection) as context:
response = self.client.post(changelist_url, data=data)
self.assertEqual(response.status_code, 200)
self.assertIn('WHERE', context.captured_queries[4]['sql'])
self.assertIn('IN', context.captured_queries[4]['sql'])
# Check only the first few characters since the UUID may have dashes.
self.assertIn(str(a.pk)[:8], context.captured_queries[4]['sql'])
def test_deterministic_order_for_unordered_model(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model doesn't have any
default ordering defined (#17198).
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model has a manager that
defines a default ordering (#17198).
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, *get_changelist_args(m))
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser('superuser')
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request('/event/', superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn('Add ', response.rendered_content)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(ROOT_URLCONF='admin_changelist.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email=None)
def test_add_row_selection(self):
"""
The status line for selected rows gets updated correctly (#22038).
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:auth_user_changelist'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
| |
# postgresql/array.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ...sql import expression, operators
from ...sql.base import SchemaEventTarget
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
def Any(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.any` method.
This method is legacy and is here for backwards-compatiblity.
.. seealso::
:func:`.expression.any_`
"""
return arrexpr.any(other, operator)
def All(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.all` method.
This method is legacy and is here for backwards-compatiblity.
.. seealso::
:func:`.expression.all_`
"""
return arrexpr.all(other, operator)
class array(expression.Tuple):
"""A Postgresql ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print stmt.compile(dialect=postgresql.dialect())
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
.. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
See also:
:class:`.postgresql.ARRAY`
"""
__visit_name__ = 'array'
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj):
return array([
expression.BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
])
def self_group(self, against=None):
if (against in (
operators.any_op, operators.all_op, operators.getitem)):
return expression.Grouping(self)
else:
return self
CONTAINS = operators.custom_op("@>", precedence=5)
CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
class ARRAY(SchemaEventTarget, sqltypes.Array):
"""Postgresql ARRAY type.
.. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
a subclass of the core :class:`.Array` type.
The :class:`.postgresql.ARRAY` type is constructed in the same way
as the core :class:`.Array` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
The :class:`.postgresql.ARRAY` type provides all operations defined on the
core :class:`.Array` type, including support for "dimensions", indexed
access, and simple matching such as :meth:`.Array.Comparator.any`
and :meth:`.Array.Comparator.all`. :class:`.postgresql.ARRAY` class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`,
and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
mytable.c.data.contains([1, 2])
The :class:`.postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
conjunction with the :class:`.ENUM` type. For a workaround, see the
special type at :ref:`postgresql_array_of_enum`.
.. seealso::
:class:`.types.Array` - base array type
:class:`.postgresql.array` - produces a literal array value.
"""
class Comparator(sqltypes.Array.Comparator):
"""Define comparison operations for :class:`.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.Array.Comparator` class, including
:meth:`.types.Array.Comparator.any` and
:meth:`.types.Array.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and Postgresql one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column):
"""Support SchemaEentTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEentTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x, itemproc,
dim - 1 if dim is not None else None,
collection)
for x in arr
)
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).\
bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).\
result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list)
return process
ischema_names['_array'] = ARRAY
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from neutron.openstack.common import eventlet_backdoor
from neutron.openstack.common._i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.openstack.common import systemd
from neutron.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| |
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import functools
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of15']
class action(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = action.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = action()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("action {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(action):
subtypes = {}
type = 65535
def __init__(self, experimenter=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
length = sum([len(x) for x in packed])
packed.append(loxi.generic_util.pad_to(8, length))
length += len(packed[-1])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_checksum(bsn):
type = 65535
experimenter = 6035143
subtype = 4
def __init__(self, checksum=None):
if checksum != None:
self.checksum = checksum
else:
self.checksum = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(util.pack_checksum_128(self.checksum))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_checksum()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
obj.checksum = util.unpack_checksum_128(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.checksum != other.checksum: return False
return True
def pretty_print(self, q):
q.text("bsn_checksum {")
with q.group():
with q.indent(2):
q.breakable()
q.text("checksum = ");
q.pp(self.checksum)
q.breakable()
q.text('}')
bsn.subtypes[4] = bsn_checksum
class bsn_gentable(bsn):
type = 65535
experimenter = 6035143
subtype = 5
def __init__(self, table_id=None, key=None):
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if key != None:
self.key = key
else:
self.key = []
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.table_id))
packed.append(loxi.generic_util.pack_list(self.key))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_gentable()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 5)
obj.table_id = reader.read("!L")[0]
obj.key = loxi.generic_util.unpack_list(reader, ofp.bsn_tlv.bsn_tlv.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
if self.key != other.key: return False
return True
def pretty_print(self, q):
q.text("bsn_gentable {")
with q.group():
with q.indent(2):
q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("key = ");
q.pp(self.key)
q.breakable()
q.text('}')
bsn.subtypes[5] = bsn_gentable
class bsn_mirror(bsn):
type = 65535
experimenter = 6035143
subtype = 1
def __init__(self, dest_port=None, vlan_tag=None, copy_stage=None):
if dest_port != None:
self.dest_port = dest_port
else:
self.dest_port = 0
if vlan_tag != None:
self.vlan_tag = vlan_tag
else:
self.vlan_tag = 0
if copy_stage != None:
self.copy_stage = copy_stage
else:
self.copy_stage = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.dest_port))
packed.append(struct.pack("!L", self.vlan_tag))
packed.append(struct.pack("!B", self.copy_stage))
packed.append(b'\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_mirror()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
obj.dest_port = reader.read("!L")[0]
obj.vlan_tag = reader.read("!L")[0]
obj.copy_stage = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dest_port != other.dest_port: return False
if self.vlan_tag != other.vlan_tag: return False
if self.copy_stage != other.copy_stage: return False
return True
def pretty_print(self, q):
q.text("bsn_mirror {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dest_port = ");
q.text("%#x" % self.dest_port)
q.text(","); q.breakable()
q.text("vlan_tag = ");
q.text("%#x" % self.vlan_tag)
q.text(","); q.breakable()
q.text("copy_stage = ");
q.text("%#x" % self.copy_stage)
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_mirror
class bsn_set_tunnel_dst(bsn):
type = 65535
experimenter = 6035143
subtype = 2
def __init__(self, dst=None):
if dst != None:
self.dst = dst
else:
self.dst = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.dst))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_set_tunnel_dst()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
obj.dst = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dst != other.dst: return False
return True
def pretty_print(self, q):
q.text("bsn_set_tunnel_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dst = ");
q.text("%#x" % self.dst)
q.breakable()
q.text('}')
bsn.subtypes[2] = bsn_set_tunnel_dst
class copy_ttl_in(action):
type = 12
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_in()
_type = reader.read("!H")[0]
assert(_type == 12)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_in {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[12] = copy_ttl_in
class copy_ttl_out(action):
type = 11
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_out()
_type = reader.read("!H")[0]
assert(_type == 11)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_out {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[11] = copy_ttl_out
class dec_mpls_ttl(action):
type = 16
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = dec_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 16)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[16] = dec_mpls_ttl
class dec_nw_ttl(action):
type = 24
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = dec_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 24)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[24] = dec_nw_ttl
class group(action):
type = 22
def __init__(self, group_id=None):
if group_id != None:
self.group_id = group_id
else:
self.group_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.group_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = group()
_type = reader.read("!H")[0]
assert(_type == 22)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.group_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.group_id != other.group_id: return False
return True
def pretty_print(self, q):
q.text("group {")
with q.group():
with q.indent(2):
q.breakable()
q.text("group_id = ");
q.text("%#x" % self.group_id)
q.breakable()
q.text('}')
action.subtypes[22] = group
class meter(action):
type = 29
def __init__(self, meter_id=None):
if meter_id != None:
self.meter_id = meter_id
else:
self.meter_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.meter_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = meter()
_type = reader.read("!H")[0]
assert(_type == 29)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.meter_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.meter_id != other.meter_id: return False
return True
def pretty_print(self, q):
q.text("meter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("meter_id = ");
q.text("%#x" % self.meter_id)
q.breakable()
q.text('}')
action.subtypes[29] = meter
class nicira(experimenter):
subtypes = {}
type = 65535
experimenter = 8992
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
packed.append(b'\x00' * 2)
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = nicira.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = nicira()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
obj.subtype = reader.read("!H")[0]
reader.skip(2)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("nicira {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[8992] = nicira
class nicira_dec_ttl(nicira):
type = 65535
experimenter = 8992
subtype = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
packed.append(b'\x00' * 2)
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = nicira_dec_ttl()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!H")[0]
assert(_subtype == 18)
reader.skip(2)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("nicira_dec_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
nicira.subtypes[18] = nicira_dec_ttl
class output(action):
type = 0
def __init__(self, port=None, max_len=None):
if port != None:
self.port = port
else:
self.port = 0
if max_len != None:
self.max_len = max_len
else:
self.max_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(util.pack_port_no(self.port))
packed.append(struct.pack("!H", self.max_len))
packed.append(b'\x00' * 6)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = output()
_type = reader.read("!H")[0]
assert(_type == 0)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.port = util.unpack_port_no(reader)
obj.max_len = reader.read("!H")[0]
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def pretty_print(self, q):
q.text("output {")
with q.group():
with q.indent(2):
q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.text(","); q.breakable()
q.text("max_len = ");
q.text("%#x" % self.max_len)
q.breakable()
q.text('}')
action.subtypes[0] = output
class pop_mpls(action):
type = 20
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append(b'\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = pop_mpls()
_type = reader.read("!H")[0]
assert(_type == 20)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("pop_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[20] = pop_mpls
class pop_pbb(action):
type = 27
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = pop_pbb()
_type = reader.read("!H")[0]
assert(_type == 27)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_pbb {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[27] = pop_pbb
class pop_vlan(action):
type = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(b'\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = pop_vlan()
_type = reader.read("!H")[0]
assert(_type == 18)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[18] = pop_vlan
class push_mpls(action):
type = 19
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append(b'\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = push_mpls()
_type = reader.read("!H")[0]
assert(_type == 19)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[19] = push_mpls
class push_pbb(action):
type = 26
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append(b'\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = push_pbb()
_type = reader.read("!H")[0]
assert(_type == 26)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_pbb {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[26] = push_pbb
class push_vlan(action):
type = 17
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append(b'\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = push_vlan()
_type = reader.read("!H")[0]
assert(_type == 17)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[17] = push_vlan
class set_field(action):
type = 25
def __init__(self, field=None):
if field != None:
self.field = field
else:
self.field = None
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(self.field.pack())
length = sum([len(x) for x in packed])
packed.append(loxi.generic_util.pad_to(8, length))
length += len(packed[-1])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = set_field()
_type = reader.read("!H")[0]
assert(_type == 25)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.field = ofp.oxm.oxm.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.field != other.field: return False
return True
def pretty_print(self, q):
q.text("set_field {")
with q.group():
with q.indent(2):
q.breakable()
q.text("field = ");
q.pp(self.field)
q.breakable()
q.text('}')
action.subtypes[25] = set_field
class set_mpls_ttl(action):
type = 15
def __init__(self, mpls_ttl=None):
if mpls_ttl != None:
self.mpls_ttl = mpls_ttl
else:
self.mpls_ttl = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.mpls_ttl))
packed.append(b'\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = set_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 15)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.mpls_ttl = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.mpls_ttl != other.mpls_ttl: return False
return True
def pretty_print(self, q):
q.text("set_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.text("mpls_ttl = ");
q.text("%#x" % self.mpls_ttl)
q.breakable()
q.text('}')
action.subtypes[15] = set_mpls_ttl
class set_nw_ttl(action):
type = 23
def __init__(self, nw_ttl=None):
if nw_ttl != None:
self.nw_ttl = nw_ttl
else:
self.nw_ttl = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.nw_ttl))
packed.append(b'\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = set_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 23)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_ttl = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_ttl != other.nw_ttl: return False
return True
def pretty_print(self, q):
q.text("set_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_ttl = ");
q.text("%#x" % self.nw_ttl)
q.breakable()
q.text('}')
action.subtypes[23] = set_nw_ttl
class set_queue(action):
type = 21
def __init__(self, queue_id=None):
if queue_id != None:
self.queue_id = queue_id
else:
self.queue_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.queue_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = set_queue()
_type = reader.read("!H")[0]
assert(_type == 21)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.queue_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
return True
def pretty_print(self, q):
q.text("set_queue {")
with q.group():
with q.indent(2):
q.breakable()
q.text("queue_id = ");
q.text("%#x" % self.queue_id)
q.breakable()
q.text('}')
action.subtypes[21] = set_queue
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user dashboard computations."""
import collections
from core import jobs_registry
from core.domain import collection_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_jobs_one_off
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import stats_domain
from core.domain import stats_services
from core.domain import user_jobs_continuous
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(exp_models, stats_models, user_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.statistics, models.NAMES.user])
taskqueue_services = models.Registry.import_taskqueue_services()
COLLECTION_ID = 'cid'
COLLECTION_TITLE = 'Title'
EXP_ID = 'eid'
EXP_TITLE = 'Title'
EXP_1_ID = 'eid1'
EXP_1_TITLE = 'Title1'
EXP_2_ID = 'eid2'
EXP_2_TITLE = 'Title2'
FEEDBACK_THREAD_SUBJECT = 'feedback thread subject'
USER_ID = 'user_id'
ANOTHER_USER_ID = 'another_user_id'
USER_A_EMAIL = 'user_a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'user_b@example.com'
USER_B_USERNAME = 'b'
class ModifiedRecentUpdatesAggregator(
user_jobs_continuous.DashboardRecentUpdatesAggregator):
"""A modified DashboardRecentUpdatesAggregator that does not start a new
batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return ModifiedRecentUpdatesMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class ModifiedRecentUpdatesMRJobManager(
user_jobs_continuous.RecentUpdatesMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return ModifiedRecentUpdatesAggregator
class RecentUpdatesAggregatorUnitTests(test_utils.GenericTestBase):
"""Tests for computations involving the 'recent notifications' section of
the user dashboard.
"""
ALL_CC_MANAGERS_FOR_TESTS = [
ModifiedRecentUpdatesAggregator]
def _get_expected_activity_created_dict(
self, user_id, activity_id, activity_title, activity_type,
commit_type, last_updated_ms):
return {
'activity_id': activity_id,
'activity_title': activity_title,
'author_id': user_id,
'last_updated_ms': last_updated_ms,
'subject': (
'New %s created with title \'%s\'.' % (
activity_type, activity_title)),
'type': commit_type,
}
def _get_most_recent_exp_snapshot_created_on_ms(self, exp_id):
most_recent_snapshot = exp_services.get_exploration_snapshots_metadata(
exp_id)[-1]
return most_recent_snapshot['created_on_ms']
def _get_most_recent_collection_snapshot_created_on_ms(
self, collection_id):
most_recent_snapshot = (
collection_services.get_collection_snapshots_metadata(
collection_id)[-1])
return most_recent_snapshot['created_on_ms']
def _get_test_context(self):
return self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS)
def test_basic_computation_for_explorations(self):
with self._get_test_context():
self.save_new_valid_exploration(
EXP_ID, USER_ID, title=EXP_TITLE, category='Category')
expected_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual(len(recent_notifications), 1)
self.assertEqual(
recent_notifications[0],
self._get_expected_activity_created_dict(
USER_ID, EXP_ID, EXP_TITLE, 'exploration',
feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
expected_last_updated_ms))
def test_basic_computation_ignores_automated_exploration_commits(self):
with self._get_test_context():
self.save_new_exp_with_states_schema_v0(EXP_ID, USER_ID, EXP_TITLE)
# Confirm that the exploration is at version 1.
exploration = exp_services.get_exploration_by_id(EXP_ID)
self.assertEqual(exploration.version, 1)
v1_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
# Start migration job on all explorations, including this one.
job_id = (
exp_jobs_one_off.ExplorationMigrationJobManager.create_new())
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Confirm that the exploration is at version 2.
exploration = exp_services.get_exploration_by_id(EXP_ID)
self.assertEqual(exploration.version, 2)
v2_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
# Run the aggregator.
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
ModifiedRecentUpdatesAggregator.stop_computation(USER_ID)
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual(len(recent_notifications), 1)
self.assertEqual(
recent_notifications[0],
self._get_expected_activity_created_dict(
USER_ID, EXP_ID, EXP_TITLE, 'exploration',
feconf.UPDATE_TYPE_EXPLORATION_COMMIT, v1_last_updated_ms))
self.assertLess(
recent_notifications[0]['last_updated_ms'], v2_last_updated_ms)
# Another user makes a commit; this one should now show up in the
# original user's dashboard.
exp_services.update_exploration(
ANOTHER_USER_ID, EXP_ID, [], 'Update exploration')
v3_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual([{
'type': feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
'last_updated_ms': v3_last_updated_ms,
'activity_id': EXP_ID,
'activity_title': EXP_TITLE,
'author_id': ANOTHER_USER_ID,
'subject': 'Update exploration',
}], recent_notifications)
def test_basic_computation_with_an_update_after_exploration_is_created(
self):
with self._get_test_context():
self.save_new_valid_exploration(
EXP_ID, USER_ID, title=EXP_TITLE, category='Category')
# Another user makes a commit; this, too, shows up in the
# original user's dashboard.
exp_services.update_exploration(
ANOTHER_USER_ID, EXP_ID, [], 'Update exploration')
expected_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual([{
'type': feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
'last_updated_ms': expected_last_updated_ms,
'activity_id': EXP_ID,
'activity_title': EXP_TITLE,
'author_id': ANOTHER_USER_ID,
'subject': 'Update exploration',
}], recent_notifications)
def test_basic_computation_works_if_exploration_is_deleted(self):
with self._get_test_context():
self.save_new_valid_exploration(
EXP_ID, USER_ID, title=EXP_TITLE, category='Category')
last_updated_ms_before_deletion = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
exp_services.delete_exploration(USER_ID, EXP_ID)
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual(len(recent_notifications), 1)
self.assertEqual(sorted(recent_notifications[0].keys()), [
'activity_id', 'activity_title', 'author_id',
'last_updated_ms', 'subject', 'type'])
self.assertDictContainsSubset({
'type': feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
'activity_id': EXP_ID,
'activity_title': EXP_TITLE,
'author_id': USER_ID,
'subject': feconf.COMMIT_MESSAGE_EXPLORATION_DELETED,
}, recent_notifications[0])
self.assertLess(
last_updated_ms_before_deletion,
recent_notifications[0]['last_updated_ms'])
def test_multiple_exploration_commits_and_feedback_messages(self):
with self._get_test_context():
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
# User creates an exploration.
self.save_new_valid_exploration(
EXP_1_ID, editor_id, title=EXP_1_TITLE,
category='Category')
exp1_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_1_ID))
# User gives feedback on it.
feedback_services.create_thread(
EXP_1_ID, None, editor_id, FEEDBACK_THREAD_SUBJECT,
'text')
thread_id = feedback_services.get_all_threads(EXP_1_ID, False)[0].id
message = feedback_services.get_messages(thread_id)[0]
# User creates another exploration.
self.save_new_valid_exploration(
EXP_2_ID, editor_id, title=EXP_2_TITLE,
category='Category')
exp2_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_2_ID))
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
editor_id)[1])
self.assertEqual([(
self._get_expected_activity_created_dict(
editor_id, EXP_2_ID, EXP_2_TITLE, 'exploration',
feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
exp2_last_updated_ms)
), {
'activity_id': EXP_1_ID,
'activity_title': EXP_1_TITLE,
'author_id': editor_id,
'last_updated_ms': utils.get_time_in_millisecs(
message.created_on),
'subject': FEEDBACK_THREAD_SUBJECT,
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}, (
self._get_expected_activity_created_dict(
editor_id, EXP_1_ID, EXP_1_TITLE, 'exploration',
feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
exp1_last_updated_ms)
)], recent_notifications)
def test_making_feedback_thread_does_not_subscribe_to_exploration(self):
with self._get_test_context():
self.signup(USER_A_EMAIL, USER_A_USERNAME)
user_a_id = self.get_user_id_from_email(USER_A_EMAIL)
self.signup(USER_B_EMAIL, USER_B_USERNAME)
user_b_id = self.get_user_id_from_email(USER_B_EMAIL)
# User A creates an exploration.
self.save_new_valid_exploration(
EXP_ID, user_a_id, title=EXP_TITLE, category='Category')
exp_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
# User B starts a feedback thread.
feedback_services.create_thread(
EXP_ID, None, user_b_id, FEEDBACK_THREAD_SUBJECT, 'text')
thread_id = feedback_services.get_all_threads(EXP_ID, False)[0].id
message = feedback_services.get_messages(thread_id)[0]
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications_for_user_a = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
user_a_id)[1])
recent_notifications_for_user_b = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
user_b_id)[1])
expected_thread_notification = {
'activity_id': EXP_ID,
'activity_title': EXP_TITLE,
'author_id': user_b_id,
'last_updated_ms': utils.get_time_in_millisecs(
message.created_on),
'subject': FEEDBACK_THREAD_SUBJECT,
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}
expected_creation_notification = (
self._get_expected_activity_created_dict(
user_a_id, EXP_ID, EXP_TITLE, 'exploration',
feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
exp_last_updated_ms))
# User A sees A's commit and B's feedback thread.
self.assertEqual(
recent_notifications_for_user_a, [
expected_thread_notification,
expected_creation_notification
])
# User B sees only her feedback thread, but no commits.
self.assertEqual(
recent_notifications_for_user_b, [
expected_thread_notification,
])
def test_subscribing_to_exploration_subscribes_to_its_feedback_threads(
self):
with self._get_test_context():
self.signup(USER_A_EMAIL, USER_A_USERNAME)
user_a_id = self.get_user_id_from_email(USER_A_EMAIL)
self.signup(USER_B_EMAIL, USER_B_USERNAME)
user_b_id = self.get_user_id_from_email(USER_B_EMAIL)
user_a = user_services.UserActionsInfo(user_a_id)
# User A creates an exploration.
self.save_new_valid_exploration(
EXP_ID, user_a_id, title=EXP_TITLE, category='Category')
exp_last_updated_ms = (
self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))
# User B starts a feedback thread.
feedback_services.create_thread(
EXP_ID, None, user_b_id, FEEDBACK_THREAD_SUBJECT, 'text')
thread_id = feedback_services.get_all_threads(EXP_ID, False)[0].id
message = feedback_services.get_messages(thread_id)[0]
# User A adds user B as an editor of the exploration.
rights_manager.assign_role_for_exploration(
user_a, EXP_ID, user_b_id, rights_manager.ROLE_EDITOR)
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications_for_user_a = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
user_a_id)[1])
recent_notifications_for_user_b = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
user_b_id)[1])
expected_thread_notification = {
'activity_id': EXP_ID,
'activity_title': EXP_TITLE,
'author_id': user_b_id,
'last_updated_ms': utils.get_time_in_millisecs(
message.created_on),
'subject': FEEDBACK_THREAD_SUBJECT,
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}
expected_creation_notification = (
self._get_expected_activity_created_dict(
user_a_id, EXP_ID, EXP_TITLE, 'exploration',
feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
exp_last_updated_ms))
# User A sees A's commit and B's feedback thread.
self.assertEqual(
recent_notifications_for_user_a, [
expected_thread_notification,
expected_creation_notification
])
# User B sees A's commit and B's feedback thread.
self.assertEqual(
recent_notifications_for_user_b, [
expected_thread_notification,
expected_creation_notification,
])
def test_basic_computation_for_collections(self):
with self._get_test_context():
self.save_new_default_collection(
COLLECTION_ID, USER_ID, title=COLLECTION_TITLE)
expected_last_updated_ms = (
self._get_most_recent_collection_snapshot_created_on_ms(
COLLECTION_ID))
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual(len(recent_notifications), 1)
self.assertEqual(
recent_notifications[0],
self._get_expected_activity_created_dict(
USER_ID, COLLECTION_ID, COLLECTION_TITLE, 'collection',
feconf.UPDATE_TYPE_COLLECTION_COMMIT,
expected_last_updated_ms))
def test_basic_computation_with_an_update_after_collection_is_created(self):
with self._get_test_context():
self.save_new_default_collection(
COLLECTION_ID, USER_ID, title=COLLECTION_TITLE)
# Another user makes a commit; this, too, shows up in the
# original user's dashboard.
collection_services.update_collection(
ANOTHER_USER_ID, COLLECTION_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'A new title'
}], 'Update collection')
expected_last_updated_ms = (
self._get_most_recent_collection_snapshot_created_on_ms(
COLLECTION_ID))
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual([{
'type': feconf.UPDATE_TYPE_COLLECTION_COMMIT,
'last_updated_ms': expected_last_updated_ms,
'activity_id': COLLECTION_ID,
'activity_title': 'A new title',
'author_id': ANOTHER_USER_ID,
'subject': 'Update collection',
}], recent_notifications)
def test_basic_computation_works_if_collection_is_deleted(self):
with self._get_test_context():
self.save_new_default_collection(
COLLECTION_ID, USER_ID, title=COLLECTION_TITLE)
last_updated_ms_before_deletion = (
self._get_most_recent_collection_snapshot_created_on_ms(
COLLECTION_ID))
collection_services.delete_collection(USER_ID, COLLECTION_ID)
ModifiedRecentUpdatesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
recent_notifications = (
ModifiedRecentUpdatesAggregator.get_recent_notifications(
USER_ID)[1])
self.assertEqual(len(recent_notifications), 1)
self.assertEqual(sorted(recent_notifications[0].keys()), [
'activity_id', 'activity_title', 'author_id',
'last_updated_ms', 'subject', 'type'])
self.assertDictContainsSubset({
'type': feconf.UPDATE_TYPE_COLLECTION_COMMIT,
'activity_id': COLLECTION_ID,
'activity_title': COLLECTION_TITLE,
'author_id': USER_ID,
'subject': feconf.COMMIT_MESSAGE_COLLECTION_DELETED,
}, recent_notifications[0])
self.assertLess(
last_updated_ms_before_deletion,
recent_notifications[0]['last_updated_ms'])
class ModifiedUserStatsAggregator(
user_jobs_continuous.UserStatsAggregator):
"""A modified UserStatsAggregator that does not start a new
batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return ModifiedUserStatsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class ModifiedUserStatsMRJobManager(
user_jobs_continuous.UserStatsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return ModifiedUserStatsAggregator
class UserStatsAggregatorTest(test_utils.GenericTestBase):
""" Tests the calculation of a user's statistics -
impact score, average ratings, total plays
from the continuous computation of UserStatsAggregator.
"""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
EXP_ID_3 = 'exp_id_3'
EXP_DEFAULT_VERSION = 1
USER_SESSION_ID = 'session1'
USER_A_EMAIL = 'a@example.com'
USER_B_EMAIL = 'b@example.com'
USER_A_USERNAME = 'a'
USER_B_USERNAME = 'b'
MIN_NUM_COMPLETIONS = 2
EXPONENT = 2.0 / 3
def setUp(self):
super(UserStatsAggregatorTest, self).setUp()
self.num_completions = collections.defaultdict(int)
self.num_starts = collections.defaultdict(int)
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.user_a = user_services.UserActionsInfo(self.user_a_id)
def _mock_get_statistics(self, exp_id, unused_version):
current_completions = {
self.EXP_ID_1: stats_domain.ExplorationStats(
self.EXP_ID_1, self.EXP_DEFAULT_VERSION, 5, 2, 0, 0, 0, 0, {
'state1': stats_domain.StateStats(
0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0),
'state2': stats_domain.StateStats(
0, 0, 0, 0, 0, 0, 7, 1, 0, 0, 0),
}
),
self.EXP_ID_2: stats_domain.ExplorationStats(
self.EXP_ID_2, self.EXP_DEFAULT_VERSION, 5, 2, 0, 0, 0, 0, {
'state1': stats_domain.StateStats(
0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0),
'state2': stats_domain.StateStats(
0, 0, 0, 0, 0, 0, 7, 1, 0, 0, 0),
}
),
self.EXP_ID_3: stats_domain.ExplorationStats(
self.EXP_ID_3, self.EXP_DEFAULT_VERSION, 0, 0, 0, 0, 0, 0, {})
}
return current_completions[exp_id]
@classmethod
def _mock_get_zero_impact_score(cls, unused_exploration_id):
return 0
@classmethod
def _mock_get_below_zero_impact_score(cls, unused_exploration_id):
return -1
@classmethod
def _mock_get_positive_impact_score(cls, unused_exploration_id):
return 1
def _run_computation(self):
"""Runs the MapReduce job after running the continuous
statistics aggregator for explorations to get the correct num
completion events.
"""
with self.swap(
stats_services, 'get_exploration_stats', self._mock_get_statistics):
ModifiedUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
def _generate_user_ids(self, count):
"""Generate unique user ids to rate an exploration. Each user id needs
to be unique since each user can only give an exploration one rating.
"""
return ['user%d' % i for i in range(count)]
def _create_exploration(self, exp_id, user_id):
exploration = exp_domain.Exploration.create_default_exploration(exp_id)
exp_services.save_new_exploration(user_id, exploration)
return exploration
def _record_start(self, exp_id, exp_version, state):
"""Record start event to an exploration.
Completing the exploration is not necessary here since the total_plays
are currently being counted taking into account only the # of starts.
"""
event_services.StartExplorationEventHandler.record(
exp_id, exp_version, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
def _rate_exploration(self, exp_id, num_ratings, rating):
"""Create num_ratings ratings for exploration with exp_id,
of value rating.
"""
# Each user id needs to be unique since each user can only give an
# exploration one rating.
user_ids = self._generate_user_ids(num_ratings)
for user_id in user_ids:
rating_services.assign_rating_to_exploration(
user_id, exp_id, rating)
def _record_exploration_rating(self, exp_id, ratings):
user_ids = self._generate_user_ids(len(ratings))
self.process_and_flush_pending_tasks()
for ind, user_id in enumerate(user_ids):
event_services.RateExplorationEventHandler.record(
exp_id, user_id, ratings[ind], None)
self.process_and_flush_pending_tasks()
def _record_exploration_rating_for_user(
self, exp_id, user_id, rating, old_rating=None):
self.process_and_flush_pending_tasks()
event_services.RateExplorationEventHandler.record(
exp_id, user_id, rating, old_rating)
self.process_and_flush_pending_tasks()
def test_stats_for_user_with_no_explorations(self):
"""Test that a user who is not a contributor on any exploration
is not assigned value of impact score, total plays and average ratings.
"""
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(
self.user_a_id, strict=False)
self.assertIsNone(user_stats_model)
def test_standard_user_stats_calculation_one_exploration(self):
exploration = self._create_exploration(self.EXP_ID_1, self.user_a_id)
# Give this exploration an average rating of 4.
avg_rating = 4
self._rate_exploration(exploration.id, 5, avg_rating)
# The expected answer count is the sum of the first hit counts in the
# statistics defined in _get_mock_statistics() method above.
expected_answer_count = 15
reach = expected_answer_count ** self.EXPONENT
expected_user_impact_score = round(
((avg_rating - 2) * reach) ** self.EXPONENT)
# Verify that the impact score matches the expected.
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
self.assertEqual(
user_stats_model.impact_score, expected_user_impact_score)
def test_exploration_multiple_contributors(self):
exploration = self._create_exploration(self.EXP_ID_1, self.user_a_id)
# Give this exploration an average rating of 4.
avg_rating = 4
self._rate_exploration(exploration.id, 5, avg_rating)
exp_services.update_exploration(self.user_b_id, self.EXP_ID_1, [], '')
# The expected answer count is the sum of the first hit counts in the
# statistics defined in _get_mock_statistics() method above.
expected_answer_count = 15
reach = expected_answer_count ** self.EXPONENT
contrib = 0.5
expected_user_impact_score = round(
((avg_rating - 2) * reach * contrib) ** self.EXPONENT)
# Verify that the impact score matches the expected.
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
self.assertEqual(
user_stats_model.impact_score, expected_user_impact_score)
user_stats_model = user_models.UserStatsModel.get(self.user_b_id)
self.assertEqual(
user_stats_model.impact_score, expected_user_impact_score)
def test_standard_user_stats_calculation_multiple_explorations(self):
exploration_1 = self._create_exploration(self.EXP_ID_1, self.user_a_id)
exploration_2 = self._create_exploration(self.EXP_ID_2, self.user_a_id)
avg_rating = 4
self._rate_exploration(exploration_1.id, 2, avg_rating)
self._rate_exploration(exploration_2.id, 2, avg_rating)
# The expected answer count is the sum of the first hit counts in the
# statistics defined in _get_mock_statistics() method above.
expected_answer_count = 15
reach = expected_answer_count ** self.EXPONENT
impact_per_exp = ((avg_rating - 2) * reach) # * 1 for contribution
expected_user_impact_score = round(
(impact_per_exp * 2) ** self.EXPONENT)
# Verify that the impact score matches the expected.
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
self.assertEqual(
user_stats_model.impact_score, expected_user_impact_score)
def test_only_yield_when_rating_greater_than_two(self):
"""Tests that map only yields an impact score for an
exploration when the impact score is greater than 0.
"""
self._create_exploration(self.EXP_ID_1, self.user_a_id)
# Give two ratings of 1.
self._rate_exploration(self.EXP_ID_1, 2, 1)
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_stats_model.impact_score, 0)
ModifiedUserStatsAggregator.stop_computation(self.user_a_id)
# Give two ratings of 2.
self._rate_exploration(self.EXP_ID_1, 2, 2)
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_stats_model.impact_score, 0)
ModifiedUserStatsAggregator.stop_computation(self.user_a_id)
# Give two ratings of 3. The impact score should now be nonzero.
self._rate_exploration(self.EXP_ID_1, 2, 3)
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(
self.user_a_id, strict=False)
self.assertIsNotNone(user_stats_model)
self.assertGreater(user_stats_model.impact_score, 0)
def test_impact_for_exp_with_no_answers(self):
"""Test that when an exploration has no answers, it is considered to
have no reach.
"""
exploration = self._create_exploration(self.EXP_ID_3, self.user_a_id)
self._rate_exploration(exploration.id, 5, 3)
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
self.assertEqual(user_stats_model.impact_score, 0)
def test_impact_for_exp_with_no_ratings(self):
"""Test that when an exploration has no ratings, the impact returned
from the impact function is 0.
"""
self._create_exploration(self.EXP_ID_1, self.user_a_id)
user_stats_model = user_models.UserStatsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_stats_model, None)
def test_realtime_layer_batch_job_no_ratings_plays(self):
self._create_exploration(
self.EXP_ID_1, self.user_a_id)
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(
user_stats['total_plays'], 0)
self.assertEquals(
user_stats['num_ratings'], 0)
self.assertEquals(
user_stats['average_ratings'], None)
def test_realtime_layer_batch_job_single_rating(self):
self._create_exploration(
self.EXP_ID_1, self.user_a_id)
self._record_exploration_rating(self.EXP_ID_1, [4])
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(user_stats['total_plays'], 0)
self.assertEquals(user_stats['num_ratings'], 1)
self.assertEquals(user_stats['average_ratings'], 4)
def test_realtime_layer_batch_job_single_exploration_one_owner(self):
exploration = self._create_exploration(
self.EXP_ID_1, self.user_a_id)
exp_id = self.EXP_ID_1
exp_version = self.EXP_DEFAULT_VERSION
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._record_exploration_rating(exp_id, [2, 5])
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(user_stats['total_plays'], 2)
self.assertEquals(user_stats['num_ratings'], 2)
self.assertEquals(user_stats['average_ratings'], 3.5)
def test_realtime_layer_batch_job_single_exploration_multiple_owners(self):
exploration = self._create_exploration(
self.EXP_ID_1, self.user_a_id)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_manager.ROLE_OWNER)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._record_exploration_rating(exp_id, [3, 4, 5])
self._record_exploration_rating(exp_id, [1, 5, 4])
expected_results = {
'total_plays': 2,
'num_ratings': 6,
'average_ratings': 22 / 6.0
}
user_stats_1 = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(
user_stats_1['total_plays'], expected_results['total_plays'])
self.assertEquals(
user_stats_1['num_ratings'], expected_results['num_ratings'])
self.assertEquals(
user_stats_1['average_ratings'],
expected_results['average_ratings'])
user_stats_2 = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_b_id))
self.assertEquals(
user_stats_2['total_plays'], expected_results['total_plays'])
self.assertEquals(
user_stats_2['num_ratings'], expected_results['num_ratings'])
self.assertEquals(
user_stats_2['average_ratings'],
expected_results['average_ratings'])
def test_realtime_layer_batch_job_multiple_explorations_one_owner(self):
self._create_exploration(
self.EXP_ID_1, self.user_a_id)
self._create_exploration(
self.EXP_ID_2, self.user_a_id)
self._record_exploration_rating(self.EXP_ID_1, [4, 5, 2])
self._record_exploration_rating(self.EXP_ID_2, [5, 2])
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(user_stats['total_plays'], 0)
self.assertEquals(user_stats['num_ratings'], 5)
self.assertEquals(user_stats['average_ratings'], 18 / 5.0)
def test_realtime_layer_batch_job_user_rate_same_exp_multiple_times(self):
self._create_exploration(
self.EXP_ID_1, self.user_a_id)
exp_id_1 = self.EXP_ID_1
self._record_exploration_rating_for_user(exp_id_1, self.user_b_id, 5)
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(user_stats['total_plays'], 0)
self.assertEquals(user_stats['num_ratings'], 1)
self.assertEquals(user_stats['average_ratings'], 5)
self._record_exploration_rating_for_user(
exp_id_1, self.user_b_id, 3, old_rating=5)
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
self.assertEquals(user_stats['total_plays'], 0)
self.assertEquals(user_stats['num_ratings'], 1)
self.assertEquals(user_stats['average_ratings'], 3)
def test_both_realtime_layer_and_batch_data(self):
exploration_1 = self._create_exploration(self.EXP_ID_1, self.user_a_id)
exploration_2 = self._create_exploration(self.EXP_ID_2, self.user_a_id)
exp_id_1 = self.EXP_ID_1
exp_id_2 = self.EXP_ID_2
exp_version = self.EXP_DEFAULT_VERSION
state_1 = exploration_1.init_state_name
state_2 = exploration_2.init_state_name
self._rate_exploration(exp_id_1, 2, 4)
self._rate_exploration(exp_id_2, 4, 3)
# Run the computation and check data from batch job.
self._run_computation()
user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
# The total plays is the sum of the number of starts of both the
# exploration_1 and exploration_2 as defined in the
# _mock_get_statistics() method above.
self.assertEqual(user_stats_model.total_plays, 14)
self.assertEqual(user_stats_model.num_ratings, 6)
self.assertEqual(user_stats_model.average_ratings, 20 / 6.0)
# Stop the batch job. Fire up a few events and check data from realtime
# job.
ModifiedUserStatsAggregator.stop_computation(self.user_a_id)
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_exploration_rating(exp_id_1, [2, 5])
self._record_exploration_rating(exp_id_2, [4, 1])
user_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_a_id))
# After recording two start events, the total plays is now increased by
# two.
self.assertEquals(user_stats['total_plays'], 16)
self.assertEquals(user_stats['num_ratings'], 10)
self.assertEquals(user_stats['average_ratings'], 32 / 10.0)
| |
import os
import sys
import re
import math
import shutil
import base64
import wx
import sqlite3
import logging, logging.handlers
from wx.lib.mixins.listctrl import ColumnSorterMixin
import wx.lib.delayedresult as DR
from ABEsafe_gen import ABEsafe_generator as GEN
TITLE_COLOR = wx.Colour(19, 57, 204)
LABEL_COLOR = wx.Colour(104, 94, 255)
USUAL_COLOR = wx.Colour(103, 195, 7)
LESS_COLOR = wx.Colour(255, 242, 35)
IGNORE_COLOR = wx.Colour(84, 88, 80)
FUN1_COLOR = wx.Colour(178, 77, 0)
FUN2_COLOR = wx.Colour(255, 181, 125)
FUN3_COLOR = wx.Colour(255, 154, 77)
FUN4_COLOR = wx.Colour(18, 160, 178)
FUN5_COLOR = wx.Colour(180, 246, 255)
FUN6_COLOR = wx.Colour(0, 200, 50)
BLACK_COLOR = wx.Colour(40, 40, 20)
SCREEN_SIZE = (640, 480)
departmentList = []
positionList = []
class AdminApp(wx.App):
def __init__(self,log):
wx.App.__init__(self)
self.log = log
LoginWindows(self,log)
def OnInit(self):
self.frame = None
return True
def BringWindowToFront(self):
try:
self.GetTopWindow().Raise()
except:
pass
def OnActivate(self,ev):
if ev.GetActive():
self.BringWindowToFront()
else:
ev.Skip()
def MacReopenApp(self):
self.BringWindowToFront()
class MainFrame(wx.Frame):
def __init__(self,log,titlename,framesize):
wx.Frame.__init__(self,None,title=titlename,size=framesize)
self.log = log
self.staffId = ""
self.username = ""
self.department = ""
self.position = ""
def setupPanel(self):
global departmentList, positionList
self.panel = wx.Panel(self)
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.createUserSizer = wx.BoxSizer(wx.HORIZONTAL)
self.staffIdSizer = wx.BoxSizer(wx.VERTICAL)
self.usernameSizer = wx.BoxSizer(wx.VERTICAL)
self.departmentSizer = wx.BoxSizer(wx.VERTICAL)
self.positionSizer = wx.BoxSizer(wx.VERTICAL)
self.lastColumnSizer = wx.BoxSizer(wx.VERTICAL)
self.userListButtonSizer = wx.BoxSizer(wx.HORIZONTAL)
self.createUserStaffIdLabel = wx.StaticText(self.panel,label="Staff ID")
self.createStaffIdTextBox = wx.TextCtrl(self.panel,size=(70,22),style=wx.TE_NOHIDESEL)
self.staffIdSizer.Add(self.createUserStaffIdLabel,0,wx.ALL,5)
self.staffIdSizer.Add(self.createStaffIdTextBox,1,wx.ALL,5)
self.createUsernameLabel = wx.StaticText(self.panel,label="User name")
self.createUsernameTextBox = wx.TextCtrl(self.panel,style=wx.TE_NOHIDESEL)
self.usernameSizer.Add(self.createUsernameLabel,0,wx.ALL,5)
self.usernameSizer.Add(self.createUsernameTextBox,1,wx.ALL,5)
self.loadDepartmentList()
self.createDepartmentLabel = wx.StaticText(self.panel,label="Department")
self.createUserDepartmentCombobox = wx.ComboBox(self.panel,size=(120,25),choices=departmentList)
self.addDepartmentButton = wx.Button(self.panel,label="Add Department")
self.addDepartmentButton.Bind(wx.EVT_BUTTON,self.onAddDepartmentClicked)
self.addDepartmentButton.Disable()
self.departmentSizer.Add(self.createDepartmentLabel,0,wx.ALL,5)
self.departmentSizer.Add(self.createUserDepartmentCombobox,1,wx.ALL,5)
self.departmentSizer.Add(self.addDepartmentButton,0,wx.ALL,5)
self.loadPositionList()
self.createPositionLabel = wx.StaticText(self.panel,label="Position")
self.createUserPositionCombobox = wx.ComboBox(self.panel,size=(120,25),choices=positionList)
self.addPositionButton = wx.Button(self.panel,label="Add Position")
self.addPositionButton.Bind(wx.EVT_BUTTON,self.onAddPositionClicked)
self.addPositionButton.Disable()
self.positionSizer.Add(self.createPositionLabel,0,wx.ALL,5)
self.positionSizer.Add(self.createUserPositionCombobox,1,wx.ALL,5)
self.positionSizer.Add(self.addPositionButton,0,wx.ALL,5)
self.createUserButton = wx.Button(self.panel,label="Create user")
self.createUserButton.Bind(wx.EVT_BUTTON,self.onCreateNewUserClicked)
self.createUserButton.Disable()
self.lastColumnSizer.Add(self.createUserButton,0,wx.ALL,5)
self.createUserSizer.Add(self.staffIdSizer,0,wx.ALL,5)
self.createUserSizer.Add(self.usernameSizer,0,wx.ALL,5)
self.createUserSizer.Add(self.departmentSizer,0,wx.ALL,5)
self.createUserSizer.Add(self.positionSizer,0,wx.ALL,5)
self.createUserSizer.Add(self.lastColumnSizer,0,wx.ALL|wx.EXPAND,5)
self.mainSizer.Add(self.createUserSizer,0,wx.ALL,10)
panel_pos = self.panel.GetPosition()
panel_size = self.panel.GetSize()
self.userdata = self.getUserList()
self.userList = self.SortedListCtrl(self.panel,data=self.userdata)
self.userList.InsertColumn(0,"Staff ID",width=80)
self.userList.InsertColumn(1,"User name",width=220)
self.userList.InsertColumn(2,"Department",width=150)
self.userList.InsertColumn(3,"Position",width=150)
dict_index = range(len(self.userdata))
zipped = zip(dict_index,self.userdata)
userDictionary = dict(zipped)
items = userDictionary.items()
for key,data in items:
index = self.userList.InsertItem(sys.maxint,str(data[0]))
self.userList.SetItem(index,1,str(data[1]))
self.userList.SetItem(index,2,str(data[2]))
self.userList.SetItem(index,3,str(data[3]))
self.userList.SetItemData(index,key)
self.userList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.onUserListItemSelected)
self.refreshUserListButton = wx.Button(self.panel,label="Refresh List")
self.refreshUserListButton.Bind(wx.EVT_BUTTON,self.onRefreshButtonClicked)
self.removeUserButton = wx.Button(self.panel,label="Remove User")
self.removeUserButton.Bind(wx.EVT_BUTTON,self.onRemoveUserClicked)
self.removeUserButton.Disable()
self.passphraseUserButton = wx.Button(self.panel,label="Passphrase")
self.passphraseUserButton.Bind(wx.EVT_BUTTON,self.showPassphrase)
self.passphraseUserButton.Disable()
self.userListButtonSizer.Add(self.refreshUserListButton,0,wx.ALL,5)
self.userListButtonSizer.Add(self.removeUserButton,0,wx.ALL,5)
self.userListButtonSizer.Add(self.passphraseUserButton,0,wx.ALL,5)
self.mainSizer.Add(self.userListButtonSizer,0,wx.ALL,5)
self.mainSizer.Add(self.userList,1,wx.ALL|wx.EXPAND,10)
self.panel.SetSizer(self.mainSizer)
self.checkNewGroupTimer = wx.CallLater(500,self.checkNewGroup)
def updateUserList(self):
self.userdata = self.getUserList()
self.userList.DeleteAllItems()
self.userList.itemDataMap = self.userdata
dict_index = range(len(self.userdata))
zipped = zip(dict_index,self.userdata)
userDictionary = dict(zipped)
items = userDictionary.items()
for key,data in items:
index = self.userList.InsertItem(sys.maxint,str(data[0]))
self.userList.SetItem(index,1,data[1])
self.userList.SetItem(index,2,data[2])
self.userList.SetItem(index,3,data[3])
self.userList.SetItemData(index,key)
def onRefreshButtonClicked(self,e):
self.updateUserList()
class SortedListCtrl(wx.ListCtrl,ColumnSorterMixin):
def __init__(self,parent,data):
wx.ListCtrl.__init__(self,parent,style=wx.LC_REPORT|wx.LC_AUTOARRANGE|wx.LC_SORT_ASCENDING)
ColumnSorterMixin.__init__(self,len(data))
self.itemDataMap = data
def GetListCtrl(self):
return self
def getUserList(self,no_id=True):
connection = None
users = None
mappedUsers = []
depart = None
pos = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT * FROM Users")
users = cursor.fetchall()
for (a_id,a_staff_id,a_name,a_departmentId,a_positionId,sec) in users:
try:
cursor.execute('SELECT DepartmentName FROM department WHERE Department_Id="%d"'%a_departmentId)
data = cursor.fetchall()
if data:
depart = data[0][0]
else:
self.log.warning("Invalid department name, " + str((a_id,a_staff_id,a_name,a_departmentId,a_positionId,sec)) + " is not added")
continue
except Exception, e:
logging.error(e)
try:
cursor.execute('SELECT PositionName FROM position WHERE Position_Id="%d"'%a_positionId)
data = cursor.fetchall()
if data:
pos = data[0][0]
else:
self.log.warning("Invalid position name, " + str((a_id,a_staff_id,a_name,a_departmentId,a_positionId,sec)) + " is not added")
continue
except Exception, e:
self.log.error(e)
if no_id:
mappedUsers += [(a_staff_id,a_name,depart,pos,sec)]
else:
mappedUsers += [(a_id,a_staff_id,a_name,depart,pos,sec)]
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
finally:
if connection:
connection.close()
return mappedUsers
def loadDepartmentList(self):
global departmentList
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT DepartmentName FROM department")
data = cursor.fetchall()
departmentList = []
if data:
departmentList = [a_data[0] for a_data in data]
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
finally:
if connection:
connection.close()
def loadPositionList(self):
global positionList
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT PositionName FROM position")
data = cursor.fetchall()
positionList = []
if data:
positionList = [a_data[0] for a_data in data]
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
if connection:
self.log.warning("Still have connection")
finally:
if connection:
connection.close()
def departmentValidation(self):
departmentValidation = self.department.replace("_","")
if not departmentValidation.isalnum():
wx.MessageBox("Department contains special characters or spaces","Create New User failed",wx.OK)
return False
return True
def positionValidation(self):
positionValidation = self.position.replace("_","")
if not positionValidation.isalnum():
wx.MessageBox("Position contains special characters or spaces","Create New User failed",wx.OK)
return False
return True
def onAddDepartmentClicked(self,e):
if self.departmentValidation():
confirm = wx.MessageDialog(self,'Are you sure to add new department "%s" ?'%self.department,"Confirm to add new department",wx.YES_NO|wx.NO_DEFAULT|wx.ICON_NONE)
if confirm.ShowModal() != wx.ID_YES:
return
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT DepartmentName FROM department")
data = cursor.fetchall()
if data:
data = set([a_data[0] for a_data in data])
if self.department in data:
return "department already existed"
else:
cursor.execute("INSERT INTO department VALUES(NULL,'%s')"%self.department)
wx.MessageBox("Department '%s' has been successfully created"%self.department)
else:
cursor.execute("INSERT INTO department VALUES(NULL,'%s')"%self.department)
wx.MessageBox("Department '%s' has been successfully created"%self.department)
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
finally:
if connection:
connection.close()
self.loadDepartmentList()
self.createUserDepartmentCombobox.Clear()
for a_department in departmentList:
self.createUserDepartmentCombobox.Append(a_department)
def onAddPositionClicked(self,e):
if self.positionValidation():
confirm = wx.MessageDialog(self,'Are you sure to add new position "%s" ?'%self.position,"Confirm to add new position",wx.YES_NO|wx.NO_DEFAULT|wx.ICON_NONE)
if confirm.ShowModal() != wx.ID_YES:
return
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT PositionName FROM position")
data = cursor.fetchall()
if data:
data = {a_data[0] for a_data in data}
if self.position in data:
return "position already existed"
else:
cursor.execute("INSERT INTO position VALUES(NULL,'%s')"%self.position)
wx.MessageBox("Position '%s' has been successfully created"%self.position)
else:
cursor.execute("INSERT INTO position VALUES(NULL,'%s')"%self.position)
wx.MessageBox("Position '%s' has been successfully created"%self.position)
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
finally:
if connection:
connection.close()
self.loadPositionList()
self.createUserDepartmentCombobox.Clear()
for a_department in departmentList:
self.createUserDepartmentCombobox.Append(a_department)
def onCreateNewUserClicked(self,e):
try:
staffId = int(self.createStaffIdTextBox.GetValue())
self.updateUserList()
existing_staffId = set([user[0] for user in self.userdata])
if staffId in existing_staffId:
wx.MessageBox("Staff ID already existed.","Create New User failed",wx.OK)
return
if staffId<=0:
wx.MessageBox("Staff ID should be a positive integer","Create New User failed",wx.OK)
return
except ValueError:
wx.MessageBox("Staff ID should be a positive integer","Create New User failed",wx.OK)
return
except Exception as e:
self.log.error(e)
usernameValidation = self.username.replace("_","")#''.join(self.username.split())
if not usernameValidation.isalnum():
wx.MessageBox("Username contains special characters or spaces","Create New User failed",wx.OK)
return
if not (self.departmentValidation() and self.positionValidation()):
return
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT Department_Id FROM department WHERE DepartmentName='%s'"%self.department)
data = cursor.fetchall()
depart_id = data[0][0]
cursor.execute("SELECT Position_Id FROM position WHERE PositionName='%s'"%self.position)
data = cursor.fetchall()
pos_id = data[0][0]
cursor.execute("SELECT * FROM Users WHERE Staff_Id='%d' AND Name='%s' AND department='%d' AND position='%d'"%(staffId,self.username,depart_id,pos_id))
data = cursor.fetchall()
if data:
wx.MessageBox("This user already has already been created before.")
return False
cursor.execute("INSERT INTO Users VALUES(NULL,%d, '%s', '%d', '%d', NULL)"%(staffId,self.username,depart_id,pos_id))
if GEN.generateKey(staffId,self.username,depart_id,pos_id,None):
if not os.path.exists(os.path.join(GEN.LOCAL_PATH,"userImages/default.jpg")):
print "default image " + os.path.join(GEN.LOCAL_PATH,"userImages/default.jpg") + " not found"
shutil.copyfile(os.path.join(GEN.LOCAL_PATH,"userImages/default.jpg"),os.path.join(GEN.IMG_PATH,self.username+"_"+str(staffId)+".jpg"))
f = None
code = None
with open(GEN.PRIV_NAME,mode='rb') as f:
src = f.read()
code = src.encode('base64')
oncreated = wx.Frame(self,title="User created",style=wx.DEFAULT_FRAME_STYLE|wx.STAY_ON_TOP,size=(300,220))
boxsizer = wx.BoxSizer(wx.VERTICAL)
createdlabel = wx.StaticText(oncreated,label="User '%s' has been successfully created."%self.username)
passphraselabel = wx.StaticText(oncreated,label="The secret passphrase for '%s' is:\n"%self.username)
passphrase = wx.TextCtrl(oncreated,value=code,size=(250,25),style=wx.HSCROLL)
copyButton = wx.Button(oncreated,label="Copy Passphrase")
self.Bind(wx.EVT_BUTTON,lambda event: self.onCopyPassphrase(event,passphrase.GetValue()),copyButton)
boxsizer.Add(createdlabel,0,wx.ALL|wx.ALIGN_CENTER,10)
boxsizer.Add(passphraselabel,0,wx.ALL|wx.ALIGN_CENTER,10)
boxsizer.Add(passphrase,0,wx.ALIGN_CENTER)
boxsizer.Add(copyButton,0,wx.ALL|wx.ALIGN_CENTER,10)
oncreated.SetSizer(boxsizer)
oncreated.Show(True)
else:
cursor.execute("SELECT Id FROM Users WHERE Name='%s'"%self.username)
data = cursor.fetchall()
cursor.execute("DELETE FROM Users WHERE Id='%s'"%data[-1][0])
GEN.PRIV_NAME = ""
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
finally:
if connection:
connection.close()
GEN.PRIV_NAME = ""
wx.CallLater(1000,self.updateUserList)
def onRemoveUserClicked(self,e):
selected_index = self.userList.GetNextSelected(-1)
selected_item = self.userdata[selected_index]
confirm = wx.MessageDialog(self,"Are you sure you want to delete user '%s'"%selected_item[1],"Deleting User",wx.YES_NO|wx.NO_DEFAULT|wx.ICON_NONE)
if confirm.ShowModal() != wx.ID_YES:
return False
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT Department_Id FROM department WHERE DepartmentName='%s'"%selected_item[2])
depart = cursor.fetchall()[0][0]
cursor.execute("SELECT Position_Id FROM position WHERE PositionName='%s'"%selected_item[3])
pos = cursor.fetchall()[0][0]
cursor.execute("DELETE FROM Users WHERE Staff_Id='%d' AND Name='%s' AND Department='%d' AND Position='%d'"%(int(selected_item[0]),selected_item[1],depart,pos))
try:
os.remove(os.path.join(GEN.KEYS_PATH,"%s_priv_key"%(selected_item[1]+"_"+str(selected_item[0]))))
except:
pass
try:
os.remove(os.path.join(GEN.KEYS_PATH,"%s_priv_key_meta"%(selected_item[1]+"_"+str(selected_item[0]))))
except:
pass
wx.MessageBox("User '%s' has been successfully deleted"%selected_item[1])
except sqlite3.Error, e:
if connection:
connection.rollback()
wx.MessageBox("User '%s' deletion failed due to data inconsistency"%selected_item[1])
self.log(e.args[0])
except ValueError as e:
wx.MessageBox("The selected user account contains some poor values.")
finally:
if connection:
connection.close()
wx.CallLater(1000,self.updateUserList)
self.removeUserButton.Disable()
self.passphraseUserButton.Disable()
def onUserListItemSelected(self,e):
self.removeUserButton.Enable()
self.passphraseUserButton.Enable()
def checkNewGroup(self):
self.department = self.createUserDepartmentCombobox.GetValue()
self.position = self.createUserPositionCombobox.GetValue()
if self.department and not self.department in departmentList:
self.addDepartmentButton.Enable()
else:
self.addDepartmentButton.Disable()
if self.position and not self.position in positionList:
self.addPositionButton.Enable()
else:
self.addPositionButton.Disable()
self.checkCreateUser()
self.checkNewGroupTimer.Restart(500)
def checkCreateUser(self):
self.staffId = self.createStaffIdTextBox.GetValue()
self.username = self.createUsernameTextBox.GetValue()
if self.staffId and self.username and self.department and self.position and not self.addDepartmentButton.IsEnabled() and not self.addPositionButton.IsEnabled():
self.createUserButton.Enable()
else:
self.createUserButton.Disable()
def showPassphrase(self,e):
f = None
code = None
selected_index = self.userList.GetNextSelected(-1)
selected_item = self.userdata[selected_index]
user_id = selected_item[0]
user_name = selected_item[1]
department = selected_item[2]
position = selected_item[3]
privkey_filename = user_name + "_" +str(user_id)
GEN.PRIV_NAME = os.path.join(GEN.KEYS_PATH,"%s_priv_key"%(privkey_filename))
try:
with open(GEN.PRIV_NAME,mode='rb') as f:
import base64
src = f.read()
code = src.encode('base64')
if code is None:
self.log.warning("Key cannot be encoded")
except:
wx.MessageBox("Key for '%s' does not exist"%user_name,"Key does not exist")
return
oncreated = wx.Frame(self,title="User passphrase",style=wx.DEFAULT_FRAME_STYLE|wx.STAY_ON_TOP,size=(300,170))
boxsizer = wx.BoxSizer(wx.VERTICAL)
passphraselabel = wx.StaticText(oncreated,label="The secret passphrase for '%s' is:\n"%user_name)
passphrase = wx.TextCtrl(oncreated,value=code,size=(250,25))
copyButton = wx.Button(oncreated,label="Copy Passphrase")
self.Bind(wx.EVT_BUTTON,lambda event: self.onCopyPassphrase(event,passphrase.GetValue()),copyButton)
boxsizer.Add(passphraselabel,0,wx.ALL|wx.ALIGN_CENTER,10)
boxsizer.Add(passphrase,0,wx.ALIGN_CENTER)
boxsizer.Add(copyButton,0,wx.ALL|wx.ALIGN_CENTER,10)
oncreated.SetSizer(boxsizer)
oncreated.Show(True)
def onCopyPassphrase(self,e,code):
clipdata = wx.TextDataObject()
clipdata.SetText(code)
wx.TheClipboard.Open()
wx.TheClipboard.SetData(clipdata)
wx.TheClipboard.Close()
class LoginWindows(wx.Frame):
def __init__(self,parent,log):
self.parent = parent
self.log = log
self.userSelected = False
self.nl = []
self.ndict = {}
wx.Frame.__init__(self,None,size=(400,200),title="ABEsafe Admin Tool",style=wx.DEFAULT_FRAME_STYLE)
self.Bind(wx.EVT_CLOSE,self.OnClose)
self.panel = wx.Panel(self,size=(400,200))
login_font = wx.Font(16,wx.FONTFAMILY_SWISS,wx.FONTSTYLE_NORMAL,wx.FONTWEIGHT_BOLD,False)
login_label = wx.StaticText(self.panel,label="Select your admin account",pos=(100,50))
login_label.SetFont(login_font)
login_label.SetForegroundColour(wx.BLUE)
login_font = wx.Font(14,wx.FONTFAMILY_SWISS,wx.FONTSTYLE_SLANT,wx.FONTWEIGHT_NORMAL,False)
self.sharedFolderLabel = wx.StaticText(self.panel,label="Shared Folder",pos=(20,100))
self.sharedFolderLabel.SetFont(login_font)
self.sharedFolderLabel.SetForegroundColour(wx.BLUE)
self.sharedFolderPathSelection = wx.DirPickerCtrl(self.panel,message="Select ABEsafe folder directory",pos=(145,100))
self.sharedFolderPathSelection.Bind(wx.EVT_DIRPICKER_CHANGED,self.OnSharedFolderSelected)
self.selectButton = wx.Button(self.panel,label="Select",pos=(150,140))
self.selectButton.Bind(wx.EVT_BUTTON,self.OnSelectUserAccount)
self.Show(True)
self.getDefaultSharedFolderPath()
self.selectButton.SetLabel("Select" if self.checkSystemExist() else "Create")
def checkSystemExist(self):
if self.sharedFolderPathSelection.GetPath():
GEN.SHARED_FOLDER_PATH = self.sharedFolderPathSelection.GetPath()
else:
GEN.SHARED_FOLDER_PATH = ""
GEN.ABEsafe_PATH = os.path.join(GEN.SHARED_FOLDER_PATH,"ABEsafe")
GEN.KEYS_PATH = os.path.join(".keys",GEN.SHARED_FOLDER_PATH)
GEN.CONFIG_PATH = os.path.join(GEN.ABEsafe_PATH,".configs")
GEN.IMG_PATH = os.path.join(GEN.ABEsafe_PATH,"userImages")
GEN.DATABASE = os.path.join(GEN.CONFIG_PATH,GEN.DATABASE_file)
if not os.path.exists(GEN.KEYS_PATH):
os.makedirs(GEN.KEYS_PATH)
if not(os.path.exists(GEN.ABEsafe_PATH) and os.path.exists(GEN.CONFIG_PATH) and os.path.exists(GEN.IMG_PATH) and os.path.exists(GEN.DATABASE)):
return False
else:
return True
def OnSharedFolderSelected(self,e):
if not self.checkSystemExist():
self.nl=[]
self.ndict = {}
else:
connection = None
try:
connection = sqlite3.connect(GEN.DATABASE)
with connection:
cursor = connection.cursor()
cursor.execute("SELECT Staff_Id, Name FROM Users")
users = cursor.fetchall()
self.nl = [user[1]+" (id: "+str(user[0])+")" for user in users]
self.ndict = {user[1]+" (id: "+str(user[0])+")":user[0] for user in users}
except sqlite3.Error, e:
if connection:
connection.rollback()
self.log.error(e.args[0])
except Exception as e:
self.log.error(e)
finally:
if connection:
connection.close()
self.selectButton.SetLabel("Select" if self.checkSystemExist() else "Create")
def saveSelectedPath(self):
pathexist = True if os.path.exists(os.path.join(GEN.LOCAL_PATH,".path")) else False
f = None
tmp = None
tpath = None
if pathexist:
with open(os.path.join(GEN.LOCAL_PATH,".path"),'r') as f:
tmp = f.read()
tpath = "" if len(GEN.SHARED_FOLDER_PATH)==0 else GEN.SHARED_FOLDER_PATH[:-1] if GEN.SHARED_FOLDER_PATH[-1]=="/" else GEN.SHARED_FOLDER_PATH
else:
tpath = ""
if tmp != tpath or pathexist==False:
with open(os.path.join(GEN.LOCAL_PATH,".path"),'w+') as f:
f.write(tpath)
def OnSelectUserAccount(self,e):
"""
Login window
Need to check the username and password before access to the account
"""
if not self.checkSystemExist():
createdABEsafeConfirm = wx.MessageDialog(self,"ABEsafe System is currently not built on this folder,\nDo you want to build ABEsafe System on this folder?","Build a new ABEsafe System",wx.YES_NO|wx.YES_DEFAULT|wx.ICON_NONE)
if createdABEsafeConfirm.ShowModal() == wx.ID_YES:
sampleUserSetCofirm = wx.MessageDialog(self,"Do you want to add sample users, department and position?","Sample dataset",wx.YES_NO|wx.YES_DEFAULT|wx.ICON_NONE)
if sampleUserSetCofirm.ShowModal() == wx.ID_YES:
GEN.ABEsafe_gensystem(self.log,GEN.SHARED_FOLDER_PATH,True)
else:
GEN.ABEsafe_gensystem(self.log,GEN.SHARED_FOLDER_PATH,False)
wx.MessageBox("ABEsafe has been successfully deployed.")
with open(os.path.join(GEN.LOCAL_PATH,".path"),'w+') as f:
tpath = GEN.SHARED_FOLDER_PATH[:-1] if GEN.SHARED_FOLDER_PATH[-1]=="/" else GEN.SHARED_FOLDER_PATH
f.write(tpath)
self.getDefaultSharedFolderPath()
self.parent.frame = MainFrame(self.log,"ABEsafe admin tools",SCREEN_SIZE)
self.parent.frame.setupPanel()
self.parent.SetTopWindow(self.parent.frame)
self.parent.frame.Centre()
self.parent.frame.Show(True)
self.parent.frame.Raise()
self.Destroy()
def getDefaultSharedFolderPath(self):
if os.path.exists(os.path.join(GEN.LOCAL_PATH,".path")):
f = open(os.path.join(GEN.LOCAL_PATH,".path"),'r')
tmp = f.readline()
f.close()
if os.path.exists(tmp):
abesafep = os.path.join(tmp,"ABEsafe")
cp = os.path.join(abesafep,".configs")
d = os.path.join(cp,GEN.DATABASE_file)
if os.path.exists(abesafep) and os.path.exists(cp) and os.path.exists(d):
self.sharedFolderPathSelection.SetPath(tmp)
self.OnSharedFolderSelected(self.sharedFolderPathSelection)
else:
self.log.info(".path does not exist")
def OnClose(self,e):
if self.userSelected == False:
try:
self.parent.frame.Close()
except:
pass
else:
if self.parent.frame:
self.parent.frame.Show()
self.Destroy()
if __name__=='__main__':
os.putenv('PATH',os.getenv('PATH')+':.')
logger = logging.getLogger('history')
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler('history.log', maxBytes=1<<12, backupCount=5)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(funcName)s %(message)s'))
logger.addHandler(handler)
AdminApp(logger).MainLoop()
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
def json_serving_input_fn(feat_names):
"""
Build the serving inputs
Args:
feat_name - list, Name list of features used in the prediction model.
Returns:
tf.estimator.export.ServingInputReceive
"""
def serving_input_fn():
feat_cols = [tf.feature_column.numeric_column(x) for x in feat_names]
inputs = {}
for feat in feat_cols:
inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
return serving_input_fn
def make_input_fn(data_file,
seq_len,
batch_size,
cols=None,
num_epochs=None,
shuffle=False,
train_flag=False,
filter_prob=1.0):
"""Input function for estimator.
Input function for estimator.
Args:
data_file - string, Path to input csv file.
seq_len - int, Length of time sequence.
batch_size - int, Mini-batch size.
num_epochs - int, Number of epochs
shuffle - bool, Whether to shuffle the data
cols - list, Columns to extract from csv file.
train_flag - bool, Whether in the training phase, in which we may
ignore sequences when all appliances are off.
filter_prob - float, The probability to pass data sequences with all
appliances being 'off', only valid when train_flag is True.
Returns:
tf.data.Iterator.
"""
def _mk_data(*argv):
"""Format data for further processing.
This function slices data into subsequences, extracts the flags
from the last time steps and treat each as the target for the subsequences.
"""
data = {'ActivePower_{}'.format(i + 1): x
for i, x in enumerate(tf.split(argv[0], seq_len))}
# Only take the label of the last time step in the sequence as target
flags = [tf.split(x, seq_len)[-1][0] for x in argv[1:]]
return data, tf.cast(tf.stack(flags), dtype=tf.uint8)
def _filter_data(data, labels):
"""Filter those sequences with all appliances 'off'.
Filter those sequences with all appliances 'off'.
However, with filter_prob we pass the sequence.
"""
rand_num = tf.random_uniform([], 0, 1, dtype=tf.float64)
thresh = tf.constant(filter_prob, dtype=tf.float64, shape=[])
is_all_zero = tf.equal(tf.reduce_sum(labels), 0)
return tf.logical_or(tf.logical_not(is_all_zero),
tf.less(rand_num, thresh))
record_defaults = [tf.float64, ] + [tf.int32] * (len(cols) - 1)
dataset = tf.contrib.data.CsvDataset([data_file, ],
record_defaults,
header=True,
select_cols=cols)
dataset = dataset.apply(
tf.contrib.data.sliding_window_batch(window_size=seq_len))
dataset = dataset.map(_mk_data)
if train_flag:
dataset = dataset.filter(_filter_data).shuffle(60 * 60 * 24 * 7)
if shuffle:
dataset = dataset.shuffle(buffer_size=batch_size * 10)
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def model_fn(features, labels, mode, params):
"""Build a customized model for energy disaggregation.
The model authoring uses pure tensorflow.layers.
Denote gross energy in the house as a sequence
$(x_t, x_{t+1}, \cdots, x_{t+n-1}) \in \mathcal{R}^n$,
and the on/off states of appliances at time $t$ as
$y_{t} = \{y^i_t \mid y^i_t = [{appliance}\ i\ is\ on\ at\ time\ t ]\}$,
then we are learning a function
$f(x_t, x_{t+1}, \cdots, x_{t+n-1}) \mapsto \hat{y}_{t+n-1}$.
Args:
features: dict(str, tf.data.Dataset)
labels: tf.data.Dataset
mode: One of {tf.estimator.ModeKeys.EVAL,
tf.estimator.ModeKeys.TRAIN,
tf.estimator.ModeKeys.PREDICT}
params: Other related parameters
Returns:
tf.estimator.EstimatorSpec.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info('TRAIN')
else:
tf.logging.info('EVAL | PREDICT')
feat_cols = [tf.feature_column.numeric_column(x) for x in params['feat_cols']]
seq_data = tf.feature_column.input_layer(features, feat_cols)
if not params['use_keras']:
tf.logging.info('Tensorflow authoring')
seq_data_shape = tf.shape(seq_data)
batch_size = seq_data_shape[0]
# RNN network using multilayer LSTM
cells = [tf.nn.rnn_cell.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(params['lstm_size']), input_keep_prob=1 - params['dropout_rate'])
for _ in range(params['num_layers'])]
lstm = tf.nn.rnn_cell.MultiRNNCell(cells)
# Initialize the state of each LSTM cell to zero
state = lstm.zero_state(batch_size, dtype=tf.float32)
# Unroll multiple time steps and the output size is:
# [batch_size, max_time, cell.output_size]
outputs, states = tf.nn.dynamic_rnn(cell=lstm,
inputs=tf.expand_dims(seq_data, -1),
initial_state=state,
dtype=tf.float32)
# Flatten the 3D output to 2D as [batch_size, max_time * cell.output_size]
flatten_outputs = tf.layers.Flatten()(outputs)
# A fully connected layer. The number of output equals the number of target appliances
logits = tf.layers.Dense(params['num_appliances'])(flatten_outputs)
else:
tf.logging.info('Keras authoring')
# RNN network using multilayer LSTM with the help of Keras
model = keras.Sequential()
for _ in range(params['num_layers']):
model.add(
keras.layers.LSTM(params['lstm_size'],
dropout=params['dropout_rate'],
return_sequences=True)
)
# Flatten the 3D output to 2D as [batch_size, max_time * cell.output_size]
model.add(keras.layers.Flatten())
# A fully connected layer. The number of output equals the number of target appliances
model.add(keras.layers.Dense(params['num_appliances']))
# Logits can be easily computed using Keras functional API
logits = model(tf.expand_dims(seq_data, -1))
# Probability of turning-on of each appliances corresponding output are computed by applying a sigmoid function
probs = tf.nn.sigmoid(logits)
predictions = {
'probabilities': probs,
'logits': logits
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Binary cross entropy is used as loss function
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits)
loss_avg = tf.reduce_mean(loss)
predicted_classes = tf.cast(tf.round(probs), tf.uint8)
precision = tf.metrics.precision(labels=labels,
predictions=predicted_classes)
recall = tf.metrics.recall(labels=labels,
predictions=predicted_classes)
f1_score = tf.contrib.metrics.f1_score(labels=labels,
predictions=predicted_classes)
metrics = {'precision': precision,
'recall': recall,
'f_measure': f1_score}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'])
train_op = optimizer.minimize(loss_avg, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode,
loss=loss,
train_op=train_op)
| |
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Global system tests for V8 test runners and fuzzers.
This hooks up the framework under tools/testrunner testing high-level scenarios
with different test suite extensions and build configurations.
"""
# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
# independent.
# TODO(machenbach): Move coverage recording to a global test entry point to
# include other unittest suites in the coverage report.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
# for py2/py3 compatibility
from __future__ import print_function
import collections
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from cStringIO import StringIO
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
Result = collections.namedtuple(
'Result', ['stdout', 'stderr', 'returncode'])
Result.__str__ = lambda self: (
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
(self.returncode, self.stdout, self.stderr))
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. Relevant files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
builddir = os.path.join(tempbase, 'out', 'build')
testroot = os.path.join(tempbase, 'test')
os.makedirs(builddir)
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
for suite in os.listdir(os.path.join(basedir, 'test')):
os.makedirs(os.path.join(testroot, suite))
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
shutil.copy(
os.path.join(basedir, 'test', suite, entry),
os.path.join(testroot, suite))
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def run_tests(basedir, *args, **kwargs):
"""Executes the test runner with captured output."""
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
class SystemTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Try to set up python coverage and run without it if not available.
cls._cov = None
try:
import coverage
if int(coverage.__version__.split('.')[0]) < 4:
cls._cov = None
print('Python coverage version >= 4 required.')
raise ImportError()
cls._cov = coverage.Coverage(
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
omit=['*unittest*', '*__init__.py'],
)
cls._cov.exclude('raise NotImplementedError')
cls._cov.exclude('if __name__ == .__main__.:')
cls._cov.exclude('except TestRunnerError:')
cls._cov.exclude('except KeyboardInterrupt:')
cls._cov.exclude('if options.verbose:')
cls._cov.exclude('if verbose:')
cls._cov.exclude('pass')
cls._cov.exclude('assert False')
cls._cov.start()
except ImportError:
print('Running without python coverage.')
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
global num_fuzzer
from testrunner import num_fuzzer
from testrunner.local import command
from testrunner.local import pool
command.setup_testing()
pool.setup_testing()
@classmethod
def tearDownClass(cls):
if cls._cov:
cls._cov.stop()
print('')
print(cls._cov.report(show_missing=True))
def testPass(self):
"""Test running only passing tests in two variants.
Also test printing durations.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default,stress',
'--time',
'sweet/bananas',
'sweet/raspberries',
)
self.assertIn('sweet/bananas default: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
# self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/blackberries',
'sweet/raspberries',
infra_staging=False,
)
# One of the shards gets one variant of each test.
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
self.assertIn('sweet/raspberries default', result.stdout, result)
self.assertIn('sweet/raspberries stress', result.stdout, result)
self.assertEqual(0, result.returncode, result)
else:
self.assertIn(
'sweet/blackberries default: FAIL', result.stdout, result)
self.assertIn(
'sweet/blackberries stress: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
)
# One of the shards gets one variant of each test.
self.assertIn('Running 2 tests', result.stdout, result)
self.assertIn('sweet/bananas', result.stdout, result)
self.assertIn('sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testFail(self):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def check_cleaned_json_output(
self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
# We need lexicographic sorting here to avoid non-deterministic behaviour
# The original sorting key is duration, but in our fake test we have
# non-deterministic durations before we reset them to 1
json_output['slowest_tests'].sort(key= lambda x: str(x))
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
def testFailWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output(
'expected_test_results1.json', json_path, basedir)
def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet',
infra_staging=False,
)
self.assertIn('sweet/bananaflakes default: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output(
'expected_test_results2.json', json_path, basedir)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
Using all those options at once doesn't really make much sense. This is
merely for getting coverage.
"""
with temp_base() as basedir:
override_build_config(
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False)
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
expect_text = (
'>>> Autodetected:\n'
'asan\n'
'cfi_vptr\n'
'dcheck_always_on\n'
'msan\n'
'no_i18n\n'
'tsan\n'
'ubsan_vptr\n'
'>>> Running tests for ia32.release')
self.assertIn(expect_text, result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
def testSkips(self):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testRunSkips(self):
"""Inverse the above. Test parameter to keep running skipped tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=nooptimization',
'--run-skipped',
'sweet/strawberries',
)
self.assertIn('1 tests failed', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testDefault(self):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_dir() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
with temp_base() as basedir:
result = run_tests(basedir, '--arch=ia32')
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--variants=meh')
self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
with temp_base() as basedir:
result = run_tests(basedir, '--outdir=out/build', 'sweet/bananas')
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
This also exercises various paths in statusfile logic.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--variants=default',
'sweet',
'--report',
)
self.assertIn(
'3 tests are expected to fail that we should fix',
result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--variants=default,nooptimization',
'sweet',
'--warn-unused',
)
self.assertIn( 'Unused rule: carrots', result.stdout, result)
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--variants=default',
'sweet/bananas',
'--cat',
)
self.assertIn('begin source: sweet/bananas', result.stdout, result)
self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testPredictable(self):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
that the predictable flags are passed and printed after failure.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_enable_verify_predictable=True)
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('sweet/bananas default: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify-predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSlowArch(self):
"""Test timeout factor manipulation on slow architecture."""
with temp_base() as basedir:
override_build_config(basedir, v8_target_cpu='arm64')
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
# TODO(machenbach): We don't have a way for testing if the correct
# timeout was used.
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefault(self):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
"""Test using random-seed-stress feature passing a random seed."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'--random-seed=123',
'sweet/strawberries',
)
self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSpecificVariants(self):
"""Test using NO_VARIANTS modifiers in status files skips the desire tests.
The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
But the status file applies a modifier to each skipping one of the
variants.
"""
with temp_base() as basedir:
override_build_config(basedir, is_asan=True)
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default,stress',
'sweet/bananas',
'sweet/raspberries',
)
# Both tests are either marked as running in only default or only
# slow variant.
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
"""Test that the fake status file is well-formed."""
with temp_base() as basedir:
from testrunner.local import statusfile
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
def testDotsProgress(self):
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=dots',
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testMonoProgress(self):
self._testCompactProgress('mono')
def testColorProgress(self):
self._testCompactProgress('color')
def _testCompactProgress(self, name):
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
infra_staging=False,
)
if name == 'color':
expected = ('\033[34m% 28\033[0m|'
'\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
expected = '% 28|+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
self.assertEqual(1, result.returncode, result)
def testExitAfterNFailures(self):
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--exit-after-n-failures=2',
'-j1',
'sweet/mangoes', # PASS
'sweet/strawberries', # FAIL
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
self.assertIn('sweet/mangoes default: pass', result.stdout, result)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)
self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
self.assertNotIn('sweet/raspberries', result.stdout, result)
self.assertIn('2 tests failed', result.stdout, result)
self.assertIn('3 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testNumFuzzer(self):
sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build']
with temp_base() as basedir:
with capture() as (stdout, stderr):
code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
result = Result(stdout.getvalue(), stderr.getvalue(), code)
self.assertEqual(0, result.returncode, result)
def testRunnerFlags(self):
"""Test that runner-specific flags are passed to tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--progress=verbose',
'--variants=default',
'--random-seed=42',
'sweet/bananas',
'-v',
)
self.assertIn(
'--test bananas --random-seed=42 --nohard-abort --testing-d8-test-runner',
result.stdout, result)
self.assertEqual(0, result.returncode, result)
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: 2009-11-24
# Author: Francesc Alted - faltet@pytables.org
#
# $Id$
#
########################################################################
"""Test module for diferent kind of links under PyTables"""
import os
import unittest
import tempfile
import shutil
import tables as t
from tables.tests import common
from tables.link import ExternalLink
# Test for hard links
class HardLinkTestCase(common.TempFileMixin, common.PyTablesTestCase):
def _createFile(self):
self.h5file.createArray('/', 'arr1', [1, 2])
group1 = self.h5file.createGroup('/', 'group1')
arr2 = self.h5file.createArray(group1, 'arr2', [1, 2, 3])
lgroup1 = self.h5file.createHardLink(
'/', 'lgroup1', '/group1')
self.assertTrue(lgroup1 is not None)
larr1 = self.h5file.createHardLink(
group1, 'larr1', '/arr1')
self.assertTrue(larr1 is not None)
larr2 = self.h5file.createHardLink(
'/', 'larr2', arr2)
self.assertTrue(larr2 is not None)
def test00_create(self):
"""Creating hard links"""
self._createFile()
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup1,
hardlink=True)
self._checkEqualityLeaf(self.h5file.root.arr1,
self.h5file.root.group1.larr1,
hardlink=True)
self._checkEqualityLeaf(self.h5file.root.lgroup1.arr2,
self.h5file.root.larr2,
hardlink=True)
def test01_open(self):
"""Opening a file with hard links"""
self._createFile()
self._reopen()
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup1,
hardlink=True)
self._checkEqualityLeaf(self.h5file.root.arr1,
self.h5file.root.group1.larr1,
hardlink=True)
self._checkEqualityLeaf(self.h5file.root.lgroup1.arr2,
self.h5file.root.larr2,
hardlink=True)
def test02_removeLeaf(self):
"""Removing a hard link to a Leaf"""
self._createFile()
# First delete the initial link
self.h5file.root.arr1.remove()
self.assertTrue('/arr1' not in self.h5file)
# The second link should still be there
if common.verbose:
print "Remaining link:", self.h5file.root.group1.larr1
self.assertTrue('/group1/larr1' in self.h5file)
# Remove the second link
self.h5file.root.group1.larr1.remove()
self.assertTrue('/group1/larr1' not in self.h5file)
def test03_removeGroup(self):
"""Removing a hard link to a Group"""
self._createFile()
if common.verbose:
print "Original object tree:", self.h5file
# First delete the initial link
self.h5file.root.group1._f_remove(force=True)
self.assertTrue('/group1' not in self.h5file)
# The second link should still be there
if common.verbose:
print "Remaining link:", self.h5file.root.lgroup1
print "Object tree:", self.h5file
self.assertTrue('/lgroup1' in self.h5file)
# Remove the second link
self.h5file.root.lgroup1._g_remove(recursive=True)
self.assertTrue('/lgroup1' not in self.h5file)
if common.verbose:
print "Final object tree:", self.h5file
# Test for soft links
class SoftLinkTestCase(common.TempFileMixin, common.PyTablesTestCase):
def _createFile(self):
self.h5file.createArray('/', 'arr1', [1, 2])
group1 = self.h5file.createGroup('/', 'group1')
arr2 = self.h5file.createArray(group1, 'arr2', [1, 2, 3])
lgroup1 = self.h5file.createSoftLink(
'/', 'lgroup1', '/group1')
self.assertTrue(lgroup1 is not None)
larr1 = self.h5file.createSoftLink(
group1, 'larr1', '/arr1')
self.assertTrue(larr1 is not None)
larr2 = self.h5file.createSoftLink(
'/', 'larr2', arr2)
self.assertTrue(larr2 is not None)
def test00_create(self):
"""Creating soft links"""
self._createFile()
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup1())
self._checkEqualityLeaf(self.h5file.root.arr1,
self.h5file.root.group1.larr1())
self._checkEqualityLeaf(self.h5file.root.lgroup1().arr2,
self.h5file.root.larr2())
def test01_open(self):
"""Opening a file with soft links"""
self._createFile()
self._reopen()
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup1())
self._checkEqualityLeaf(self.h5file.root.arr1,
self.h5file.root.group1.larr1())
self._checkEqualityLeaf(self.h5file.root.lgroup1().arr2,
self.h5file.root.larr2())
def test02_remove(self):
"""Removing a soft link."""
self._createFile()
# First delete the referred link
self.h5file.root.arr1.remove()
self.assertTrue('/arr1' not in self.h5file)
# The soft link should still be there (but dangling)
if common.verbose:
print "Dangling link:", self.h5file.root.group1.larr1
self.assertTrue('/group1/larr1' in self.h5file)
# Remove the soft link itself
self.h5file.root.group1.larr1.remove()
self.assertTrue('/group1/larr1' not in self.h5file)
def test03_copy(self):
"""Copying a soft link."""
self._createFile()
# Copy the link into another location
root = self.h5file.root
lgroup1 = root.lgroup1
lgroup2 = lgroup1.copy('/', 'lgroup2')
self.assertTrue('/lgroup1' in self.h5file)
self.assertTrue('/lgroup2' in self.h5file)
self.assertTrue('lgroup2' in root._v_children)
self.assertTrue('lgroup2' in root._v_links)
if common.verbose:
print "Copied link:", lgroup2
# Remove the first link
lgroup1.remove()
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup2())
def test03_overwrite(self):
"""Overwrite a soft link."""
self._createFile()
# Copy the link into another location
root = self.h5file.root
lgroup1 = root.lgroup1
lgroup2 = lgroup1.copy('/', 'lgroup2')
lgroup2 = lgroup1.copy('/', 'lgroup2', overwrite=True)
self.assertTrue('/lgroup1' in self.h5file)
self.assertTrue('/lgroup2' in self.h5file)
self.assertTrue('lgroup2' in root._v_children)
self.assertTrue('lgroup2' in root._v_links)
if common.verbose:
print "Copied link:", lgroup2
# Remove the first link
lgroup1.remove()
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup2())
def test04_move(self):
"""Moving a soft link."""
self._createFile()
# Move the link into another location
lgroup1 = self.h5file.root.lgroup1
group2 = self.h5file.createGroup('/', 'group2')
lgroup1.move(group2, 'lgroup2')
lgroup2 = self.h5file.root.group2.lgroup2
if common.verbose:
print "Moved link:", lgroup2
self.assertTrue('/lgroup1' not in self.h5file)
self.assertTrue('/group2/lgroup2' in self.h5file)
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.group2.lgroup2())
def test05_rename(self):
"""Renaming a soft link."""
self._createFile()
# Rename the link
lgroup1 = self.h5file.root.lgroup1
lgroup1.rename('lgroup2')
lgroup2 = self.h5file.root.lgroup2
if common.verbose:
print "Moved link:", lgroup2
self.assertTrue('/lgroup1' not in self.h5file)
self.assertTrue('/lgroup2' in self.h5file)
self._checkEqualityGroup(self.h5file.root.group1,
self.h5file.root.lgroup2())
def test06a_relative_path(self):
"""Using soft links with relative paths."""
self._createFile()
# Create new group
self.h5file.createGroup('/group1', 'group3')
# ... and relative link
lgroup3 = self.h5file.createSoftLink(
'/group1', 'lgroup3', 'group3')
if common.verbose:
print "Relative path link:", lgroup3
self.assertTrue('/group1/lgroup3' in self.h5file)
self._checkEqualityGroup(self.h5file.root.group1.group3,
self.h5file.root.group1.lgroup3())
def test06b_relative_path(self):
"""Using soft links with relative paths (./ version)"""
self._createFile()
# Create new group
self.h5file.createGroup('/group1', 'group3')
# ... and relative link
lgroup3 = self.h5file.createSoftLink(
'/group1', 'lgroup3', './group3')
if common.verbose:
print "Relative path link:", lgroup3
self.assertTrue('/group1/lgroup3' in self.h5file)
self._checkEqualityGroup(self.h5file.root.group1.group3,
self.h5file.root.group1.lgroup3())
def test07_walkNodes(self):
"""Checking `walkNodes` with `classname` option."""
self._createFile()
links = [node._v_pathname for node in
self.h5file.walkNodes('/', classname="Link")]
if common.verbose:
print "detected links (classname='Link'):", links
self.assertEqual(links, ['/larr2', '/lgroup1', '/group1/larr1'])
links = [node._v_pathname for node in
self.h5file.walkNodes('/', classname="SoftLink")]
if common.verbose:
print "detected links (classname='SoftLink'):", links
self.assertEqual(links, ['/larr2', '/lgroup1', '/group1/larr1'])
def test08__v_links(self):
"""Checking `Group._v_links`."""
self._createFile()
links = [node for node in self.h5file.root._v_links]
if common.verbose:
print "detected links (under root):", links
self.assertEqual(len(links), 2)
links = [node for node in self.h5file.root.group1._v_links]
if common.verbose:
print "detected links (under /group1):", links
self.assertEqual(links, ['larr1'])
def test09_link_to_link(self):
"""Checking linked links."""
self._createFile()
# Create a link to another existing link
lgroup2 = self.h5file.createSoftLink(
'/', 'lgroup2', '/lgroup1')
# Dereference it once:
self.assertTrue(lgroup2() is self.h5file.getNode('/lgroup1'))
if common.verbose:
print "First dereference is correct:", lgroup2()
# Dereference it twice:
self.assertTrue(lgroup2()() is self.h5file.getNode('/group1'))
if common.verbose:
print "Second dereference is correct:", lgroup2()()
def test10_copy_link_to_file(self):
"""Checking copying a link to another file."""
self._createFile()
fname = tempfile.mktemp(".h5")
h5f = t.openFile(fname, "a")
h5f.createArray('/', 'arr1', [1, 2])
h5f.createGroup('/', 'group1')
lgroup1 = self.h5file.root.lgroup1
lgroup1_ = lgroup1.copy(h5f.root, 'lgroup1')
self.assertTrue('/lgroup1' in self.h5file)
self.assertTrue('/lgroup1' in h5f)
self.assertTrue(lgroup1_ in h5f)
if common.verbose:
print "Copied link:", lgroup1_, 'in:', lgroup1_._v_file.filename
h5f.close()
os.remove(fname)
# Test for external links
class ExternalLinkTestCase(common.TempFileMixin, common.PyTablesTestCase):
def tearDown(self):
"""Remove ``extfname``."""
self.exth5file.close()
os.remove(self.extfname) # comment this for debugging purposes only
super(ExternalLinkTestCase, self).tearDown()
def _createFile(self):
self.h5file.createArray('/', 'arr1', [1, 2])
group1 = self.h5file.createGroup('/', 'group1')
self.h5file.createArray(group1, 'arr2', [1, 2, 3])
# The external file
self.extfname = tempfile.mktemp(".h5")
self.exth5file = t.openFile(self.extfname, "w")
extarr1 = self.exth5file.createArray('/', 'arr1', [1, 2])
self.assertTrue(extarr1 is not None)
extgroup1 = self.exth5file.createGroup('/', 'group1')
extarr2 = self.exth5file.createArray(extgroup1, 'arr2', [1, 2, 3])
# Create external links
lgroup1 = self.h5file.createExternalLink(
'/', 'lgroup1', '%s:/group1'%self.extfname)
self.assertTrue(lgroup1 is not None)
larr1 = self.h5file.createExternalLink(
group1, 'larr1', '%s:/arr1'%self.extfname)
self.assertTrue(larr1 is not None)
larr2 = self.h5file.createExternalLink('/', 'larr2', extarr2)
self.assertTrue(larr2 is not None)
# Re-open the external file in 'r'ead-only mode
self.exth5file.close()
self.exth5file = t.openFile(self.extfname, "r")
def test00_create(self):
"""Creating soft links"""
self._createFile()
self._checkEqualityGroup(self.exth5file.root.group1,
self.h5file.root.lgroup1())
self._checkEqualityLeaf(self.exth5file.root.arr1,
self.h5file.root.group1.larr1())
self._checkEqualityLeaf(self.h5file.root.lgroup1().arr2,
self.h5file.root.larr2())
def test01_open(self):
"""Opening a file with soft links"""
self._createFile()
self._reopen()
self._checkEqualityGroup(self.exth5file.root.group1,
self.h5file.root.lgroup1())
self._checkEqualityLeaf(self.exth5file.root.arr1,
self.h5file.root.group1.larr1())
self._checkEqualityLeaf(self.h5file.root.lgroup1().arr2,
self.h5file.root.larr2())
def test02_remove(self):
"""Removing an external link."""
self._createFile()
# Re-open the external file in 'a'ppend mode
self.exth5file.close()
self.exth5file = t.openFile(self.extfname, "a")
# First delete the referred link
self.exth5file.root.arr1.remove()
self.assertTrue('/arr1' not in self.exth5file)
# The external link should still be there (but dangling)
if common.verbose:
print "Dangling link:", self.h5file.root.group1.larr1
self.assertTrue('/group1/larr1' in self.h5file)
# Remove the external link itself
self.h5file.root.group1.larr1.remove()
self.assertTrue('/group1/larr1' not in self.h5file)
def test03_copy(self):
"""Copying an external link."""
self._createFile()
# Copy the link into another location
root = self.h5file.root
lgroup1 = root.lgroup1
lgroup2 = lgroup1.copy('/', 'lgroup2')
self.assertTrue('/lgroup1' in self.h5file)
self.assertTrue('/lgroup2' in self.h5file)
self.assertTrue('lgroup2' in root._v_children)
self.assertTrue('lgroup2' in root._v_links)
if common.verbose:
print "Copied link:", lgroup2
# Remove the first link
lgroup1.remove()
self._checkEqualityGroup(self.exth5file.root.group1,
self.h5file.root.lgroup2())
def test03_overwrite(self):
"""Overwrite an external link."""
self._createFile()
# Copy the link into another location
root = self.h5file.root
lgroup1 = root.lgroup1
lgroup2 = lgroup1.copy('/', 'lgroup2')
lgroup2 = lgroup1.copy('/', 'lgroup2', overwrite=True)
self.assertTrue('/lgroup1' in self.h5file)
self.assertTrue('/lgroup2' in self.h5file)
self.assertTrue('lgroup2' in root._v_children)
self.assertTrue('lgroup2' in root._v_links)
if common.verbose:
print "Copied link:", lgroup2
# Remove the first link
lgroup1.remove()
self._checkEqualityGroup(self.exth5file.root.group1,
self.h5file.root.lgroup2())
def test04_move(self):
"""Moving an external link."""
self._createFile()
# Move the link into another location
lgroup1 = self.h5file.root.lgroup1
group2 = self.h5file.createGroup('/', 'group2')
lgroup1.move(group2, 'lgroup2')
lgroup2 = self.h5file.root.group2.lgroup2
if common.verbose:
print "Moved link:", lgroup2
self.assertTrue('/lgroup1' not in self.h5file)
self.assertTrue('/group2/lgroup2' in self.h5file)
self._checkEqualityGroup(self.exth5file.root.group1,
self.h5file.root.group2.lgroup2())
def test05_rename(self):
"""Renaming an external link."""
self._createFile()
# Rename the link
lgroup1 = self.h5file.root.lgroup1
lgroup1.rename('lgroup2')
lgroup2 = self.h5file.root.lgroup2
if common.verbose:
print "Moved link:", lgroup2
self.assertTrue('/lgroup1' not in self.h5file)
self.assertTrue('/lgroup2' in self.h5file)
self._checkEqualityGroup(self.exth5file.root.group1,
self.h5file.root.lgroup2())
def test07_walkNodes(self):
"""Checking `walkNodes` with `classname` option."""
self._createFile()
# Create a new soft link
self.h5file.createSoftLink('/group1', 'lgroup3', './group3')
links = [node._v_pathname for node in
self.h5file.walkNodes('/', classname="Link")]
if common.verbose:
print "detected links (classname='Link'):", links
self.assertEqual(links, ['/larr2', '/lgroup1',
'/group1/larr1', '/group1/lgroup3'])
links = [node._v_pathname for node in
self.h5file.walkNodes('/', classname="ExternalLink")]
if common.verbose:
print "detected links (classname='ExternalLink'):", links
self.assertEqual(links, ['/larr2', '/lgroup1', '/group1/larr1'])
def test08__v_links(self):
"""Checking `Group._v_links`."""
self._createFile()
links = [node for node in self.h5file.root._v_links]
if common.verbose:
print "detected links (under root):", links
self.assertEqual(len(links), 2)
links = [node for node in self.h5file.root.group1._v_links]
if common.verbose:
print "detected links (under /group1):", links
self.assertEqual(links, ['larr1'])
def test09_umount(self):
"""Checking `umount()` method."""
self._createFile()
link = self.h5file.root.lgroup1
self.assertTrue(link.extfile is None)
# Dereference a external node (and hence, 'mount' a file)
enode = link()
self.assertTrue(enode is not None)
self.assertTrue(link.extfile is not None)
# Umount the link
link.umount()
self.assertTrue(link.extfile is None)
def test10_copy_link_to_file(self):
"""Checking copying a link to another file."""
self._createFile()
fname = tempfile.mktemp(".h5")
h5f = t.openFile(fname, "a")
h5f.createArray('/', 'arr1', [1, 2])
h5f.createGroup('/', 'group1')
lgroup1 = self.h5file.root.lgroup1
lgroup1_ = lgroup1.copy(h5f.root, 'lgroup1')
self.assertTrue('/lgroup1' in self.h5file)
self.assertTrue('/lgroup1' in h5f)
self.assertTrue(lgroup1_ in h5f)
if common.verbose:
print "Copied link:", lgroup1_, 'in:', lgroup1_._v_file.filename
h5f.close()
os.remove(fname)
#----------------------------------------------------------------------
def suite():
"""Return a test suite consisting of all the test cases in the module."""
theSuite = unittest.TestSuite()
niter = 1
#common.heavy = 1 # uncomment this only for testing purposes
for i in range(niter):
theSuite.addTest(unittest.makeSuite(HardLinkTestCase))
theSuite.addTest(unittest.makeSuite(SoftLinkTestCase))
theSuite.addTest(unittest.makeSuite(ExternalLinkTestCase))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sends notifications after automatic exports.
Automatically comments on a Gerrit CL when its corresponding PR fails the Taskcluster check. In
other words, surfaces cross-browser WPT regressions from Github to Gerrit.
Design doc: https://docs.google.com/document/d/1MtdbUcWBDZyvmV0FOdsTWw_Jv16YtE6KW5BnnCVYX4c
"""
import logging
from blinkpy.w3c.common import WPT_REVISION_FOOTER, WPT_GH_URL
from blinkpy.w3c.gerrit import GerritError
from blinkpy.w3c.wpt_github import GitHubError
_log = logging.getLogger(__name__)
RELEVANT_TASKCLUSTER_CHECKS = [
'wpt-chrome-dev-stability', 'wpt-firefox-nightly-stability', 'lint',
'infrastructure/ tests'
]
class ExportNotifier(object):
def __init__(self, host, wpt_github, gerrit, dry_run=True):
self.host = host
self.wpt_github = wpt_github
self.gerrit = gerrit
self.dry_run = dry_run
def main(self):
"""Surfaces relevant Taskcluster check failures to Gerrit through comments."""
gerrit_dict = {}
try:
_log.info('Searching for recent failiing chromium exports.')
prs = self.wpt_github.recent_failing_chromium_exports()
except GitHubError as e:
_log.info(
'Surfacing Taskcluster failures cannot be completed due to the following error:'
)
_log.error(str(e))
return True
if len(prs) > 100:
_log.error('Too many open failing PRs: %s; abort.', len(prs))
return True
_log.info('Found %d failing PRs.', len(prs))
for pr in prs:
check_runs = self.get_check_runs(pr.number)
if not check_runs:
continue
checks_results = self.get_relevant_failed_taskcluster_checks(
check_runs, pr.number)
if not checks_results:
continue
gerrit_id = self.wpt_github.extract_metadata(
'Change-Id: ', pr.body)
if not gerrit_id:
_log.error('Can not retrieve Change-Id for %s.', pr.number)
continue
gerrit_sha = self.wpt_github.extract_metadata(
WPT_REVISION_FOOTER, pr.body)
gerrit_dict[gerrit_id] = PRStatusInfo(checks_results, pr.number,
gerrit_sha)
self.process_failing_prs(gerrit_dict)
return False
def get_check_runs(self, number):
"""Retrieves check runs through a PR number.
Returns:
A JSON array representing the check runs for the HEAD of this PR.
"""
try:
branch = self.wpt_github.get_pr_branch(number)
check_runs = self.wpt_github.get_branch_check_runs(branch)
except GitHubError as e:
_log.error(str(e))
return None
return check_runs
def process_failing_prs(self, gerrit_dict):
"""Processes and comments on CLs with failed Tackcluster checks."""
_log.info('Processing %d CLs with failed Taskcluster checks.',
len(gerrit_dict))
for change_id, pr_status_info in gerrit_dict.items():
_log.info('Change-Id: %s', change_id)
try:
cl = self.gerrit.query_cl_comments_and_revisions(change_id)
has_commented = self.has_latest_taskcluster_status_commented(
cl.messages, pr_status_info)
if has_commented:
_log.info('Comment is up-to-date. Nothing to do here.')
continue
revision = cl.revisions.get(pr_status_info.gerrit_sha)
if revision:
cl_comment = pr_status_info.to_gerrit_comment(
revision['_number'])
else:
cl_comment = pr_status_info.to_gerrit_comment()
if self.dry_run:
_log.info('[dry_run] Would have commented on CL %s\n',
change_id)
_log.debug('Comments are:\n%s\n', cl_comment)
else:
_log.info('Commenting on CL %s\n', change_id)
cl.post_comment(cl_comment)
except GerritError as e:
_log.error('Could not process Gerrit CL %s: %s', change_id,
str(e))
continue
def has_latest_taskcluster_status_commented(self, messages,
pr_status_info):
"""Determines if the Taskcluster status has already been commented on the messages of a CL.
Args:
messages: messagese of a CL in JSON Array format, in chronological order.
pr_status_info: PRStatusInfo object.
"""
for message in reversed(messages):
cl_gerrit_sha = PRStatusInfo.get_gerrit_sha_from_comment(
message['message'])
if cl_gerrit_sha:
_log.debug('Found latest comment: %s', message['message'])
return cl_gerrit_sha == pr_status_info.gerrit_sha
return False
def get_relevant_failed_taskcluster_checks(self, check_runs, pr_number):
"""Filters relevant failed Taskcluster checks from check_runs.
Args:
check_runs: A JSON array; e.g. "check_runs" in
https://developer.github.com/v3/checks/runs/#response-3
pr_number: The PR number.
Returns:
A dictionary where keys are names of the Taskcluster checks and values
are URLs to the Taskcluster checks' results.
"""
checks_results = {}
for check in check_runs:
if (check['conclusion'] == 'failure') and (
check['name'] in RELEVANT_TASKCLUSTER_CHECKS):
result_url = '{}pull/{}/checks?check_run_id={}'.format(
WPT_GH_URL, pr_number, check['id'])
checks_results[check['name']] = result_url
return checks_results
class PRStatusInfo(object):
CL_SHA_TAG = 'Gerrit CL SHA: '
PATCHSET_TAG = 'Patchset Number: '
def __init__(self, checks_results, pr_number, gerrit_sha=None):
self._checks_results = checks_results
self._pr_number = pr_number
if gerrit_sha:
self._gerrit_sha = gerrit_sha
else:
self._gerrit_sha = 'Latest'
@property
def gerrit_sha(self):
return self._gerrit_sha
@staticmethod
def get_gerrit_sha_from_comment(comment):
for line in comment.splitlines():
if line.startswith(PRStatusInfo.CL_SHA_TAG):
return line[len(PRStatusInfo.CL_SHA_TAG):]
return None
def _checks_results_as_comment(self):
comment = ''
for check, url in self._checks_results.items():
comment += '\n%s (%s)' % (check, url)
return comment
def to_gerrit_comment(self, patchset=None):
comment = (
'The exported PR, {}, has failed the following check(s) '
'on GitHub:\n{}\n\nThese failures will block the export. '
'They may represent new or existing problems; please take '
'a look at the output and see if it can be fixed. '
'Unresolved failures will be looked at by the Ecosystem-Infra '
'sheriff after this CL has been landed in Chromium; if you '
'need earlier help please contact ecosystem-infra@chromium.org.\n\n'
'Any suggestions to improve this service are welcome; '
'crbug.com/1027618.').format(
'%spull/%d' % (WPT_GH_URL, self._pr_number),
self._checks_results_as_comment())
comment += ('\n\n{}{}').format(PRStatusInfo.CL_SHA_TAG,
self._gerrit_sha)
if patchset is not None:
comment += ('\n{}{}').format(PRStatusInfo.PATCHSET_TAG, patchset)
return comment
| |
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
from scripts.export.genomes import genome_fetch as gf
from config.rfam_local import TEST_DIR
# --------------------------------------------------------------------------------------------------
def test_find_proteomes_without_accessions():
id_pairs = gf.load_upid_gca_pairs()
for upid in id_pairs.keys():
print("%s %s" % (upid, id_pairs[upid]))
"""
accs = fetch_genome_accessions(upid, id_pairs[upid])
if len(accs) == 0:
print upid
else:
print "%s %s\n"%(upid,str(accs))
"""
# --------------------------------------------------------------------------------------------------
def test_fetch_ref_proteomes():
ref_prots = None
ref_prots = gf.fetch_ref_proteomes()
assert len(ref_prots) != 0 or ref_prots is not None
# --------------------------------------------------------------------------------------------------
def test_export_gca_accessions():
upid_gca_pairs = None
upid_gca_pairs = gf.export_gca_accessions("input/UPID_GCA.tsv")
assert upid_gca_pairs is not None
assert len(upid_gca_pairs.keys()) != 0
# --------------------------------------------------------------------------------------------------
def test_extract_genome_acc():
# homo sapiens
gen_acc = None
gen_acc = gf.extract_genome_acc("http://www.uniprot.org/proteomes/UP000005640.rdf")
assert gen_acc == -1 or gen_acc is not None
# --------------------------------------------------------------------------------------------------
def test_proteome_rdf_scanner():
# homo sapiens
accs = None
accs = gf.proteome_rdf_scanner("http://www.uniprot.org/proteomes/UP000005640.rdf")
assert len(accs) != 0 or accs is not None
# --------------------------------------------------------------------------------------------------
def test_fetch_genome_acc():
gen_accs = None
gen_accs = gf.fetch_genome_acc("UP000005640")
assert gen_accs is not None or len(gen_accs) != 0
# --------------------------------------------------------------------------------------------------
def test_fetch_ena_file():
# need something smaller here
if not os.path.exists("/tmp/gen_test"):
os.mkdir("/tmp/gen_test")
# Citrus psorosis virus
check = gf.fetch_ena_file("AY654894", "fasta", "/tmp/gen_test")
assert check is True
shutil.rmtree("/tmp/gen_test")
# --------------------------------------------------------------------------------------------------
def test_extract_assembly_accs():
gen_accs = None
gen_accs = gf.extract_assembly_accs("GCA_000001405.23")
assert gen_accs is not None or len(gen_accs) != 0
# --------------------------------------------------------------------------------------------------
"""
def test_fetch_genome():
gen = "GCA_000320365.1"
dest_dir = "/tmp/gen_test"
gf.fetch_genome(gen, dest_dir)
gen_files = os.listdir(os.path.join("/tmp/gen_test", gen.partition('.')[0]))
assert len(gen_files) != 0
shutil.rmtree(os.path.join("/tmp/gen_test", gen.partition('.')[0]))
"""
# --------------------------------------------------------------------------------------------------
def test_rdf_accession_search():
# homo sapiens
rdf_accs = None
rdf_accs = gf.rdf_accession_search("UP000005640", "/embl/")
assert rdf_accs is not None or len(rdf_accs) != 0
# --------------------------------------------------------------------------------------------------
def test_load_upid_gca_file():
id_pairs = None
id_pairs = gf.load_upid_gca_file("input/UPID_GCA.tsv")
assert id_pairs is not None or len(id_pairs.keys()) != 0
# --------------------------------------------------------------------------------------------------
def test_load_upid_gca_pairs():
id_pairs = None
id_pairs = gf.load_upid_gca_pairs()
assert id_pairs is not None or len(id_pairs.keys()) != 0
# --------------------------------------------------------------------------------------------------
def test_fetch_genome_accessions():
gen_accs = None
# need to get all the cases here - Unittests test_cases needed
gen_accs = gf.fetch_genome_accessions("UP000005640", "GCA_000001405.23")
assert gen_accs is not None or len(gen_accs) != 0
# --------------------------------------------------------------------------------------------------
def test_assembly_report_parser():
accessions = None
# homo sapiens
report_url = "ftp://ftp.ebi.ac.uk/pub/databases/ena/assembly/GCA_000/GCA_000001/GCA_000001405.23_sequence_report.txt"
accessions = gf.assembly_report_parser(report_url)
assert len(accessions) != 0 or accessions is not None
# --------------------------------------------------------------------------------------------------
def test_get_wgs_set_accession():
wgs_acc = None
wgs_acc = gf.get_wgs_set_accession("AYNF", "1")
assert wgs_acc is not None
# --------------------------------------------------------------------------------------------------
def test_fetch_wgs_range_accs():
wgs_accs = None
# Escherichia coli LAU-EC6
wgs_accs = gf.fetch_wgs_range_accs("AYNF01000001-AYNF01000106")
assert wgs_accs is not None or len(wgs_accs) != 0
# --------------------------------------------------------------------------------------------------
def test_download_genomes():
# Moraxella macacae
gen = "GCA_000320365.1"
dest_dir = "/tmp/gen_test"
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
check = gf.download_genomes(gen, dest_dir)
gen_files = os.listdir(os.path.join(dest_dir, gen.partition('.')[0]))
assert check is not None and len(gen_files) != 0
shutil.rmtree(dest_dir)
# --------------------------------------------------------------------------------------------------
def test_pipeline_genome_download_logic():
other_accessions = None
wgs_set = None
dest_dir = os.path.join(TEST_DIR, "genome_pipeline_test")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
upids = ["UP000011602", "UP000051297", "UP000033913", "UP000053620",
"UP000054516", "UP000014934", "UP000154645"]
for upid in upids:
print("\nupid: ", upid)
if not os.path.exists(os.path.join(dest_dir, upid)):
os.mkdir(os.path.join(dest_dir, upid))
# fetch proteome accessions, this will also copy GCA file if available
genome_accessions = gf.get_genome_unique_accessions(upid, os.path.join(dest_dir, upid))
print("Genome unique accessions: ", genome_accessions)
if genome_accessions["GCA"] != -1:
# 1. check for assembly report file
# This list is going to be empty
other_accessions = genome_accessions["OTHER"]
print("Other accessions: ", len(other_accessions))
# fetch wgs set from ENA
if len(other_accessions) == 0 and genome_accessions["WGS"] == -1:
wgs_set = gf.extract_wgs_acc_from_gca_xml(genome_accessions["GCA"])
print("No other accessions, extracting WGS set from xml...")
if wgs_set is not None or genome_accessions["GCA_NA"] == 1:
if genome_accessions["GCA_NA"] == 1:
wgs_set = genome_accessions["WGS"]
print("GCA report file unavailable, fetching WGS set")
print("Copying file from ftp - GCA section")
elif genome_accessions["WGS"] != -1 and genome_accessions["GCA"] == -1:
# First copy WGS set in upid dir
print("Copying file from ftp - WGS section")
# this should be done in all cases
# download genome accessions in proteome directory
if len(other_accessions) > 0:
print("Other accessions to download ", len(other_accessions))
# --------------------------------------------------------------------------------------------------
def test_download_gca_report_file_from_url():
gca_acc = "GCA_000700745.1"
download_status = gf.download_gca_report_file_from_url(gca_acc, TEST_DIR)
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# test_find_proteomes_without_accessions()
# test_fetch_ref_proteomes()
# test_extract_genome_acc()
# test_proteome_rdf_scanner()
# test_fetch_genome_acc()
# test_extract_assembly_accs()
# test_fetch_wgs_range_accs()
# test_fetch_genome_accessions()
# test_load_upid_gca_pairs()
# test_load_upid_gca_file()
# test_get_wgs_set_accession()
# test_assembly_report_parser()
# test_download_genomes()
# test_fetch_genome()
test_pipeline_genome_download_logic()
# test_download_gca_report_file_from_url()
| |
#!/usr/bin/env python
#########################################################################################
#
# Parser for PropSeg binary.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Modified: 2015-03-03
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: remove temp files in case rescaled is not "1"
import os
import pathlib
import sys
import logging
import numpy as np
from scipy import ndimage as ndi
from spinalcordtoolbox.image import Image, add_suffix, zeros_like, convert
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, run_proc, printv, set_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, rmtree, extract_fname, mv, copy
from spinalcordtoolbox.centerline import optic
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.scripts import sct_image
logger = logging.getLogger(__name__)
def check_and_correct_segmentation(fname_segmentation, fname_centerline, folder_output='', threshold_distance=5.0,
remove_temp_files=1, verbose=0):
"""
This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
to issue #1074).
Args:
fname_segmentation: filename of binary segmentation
fname_centerline: filename of binary centerline
threshold_distance: threshold, in mm, beyond which centerlines are not coherent
verbose:
Returns: None
"""
printv('\nCheck consistency of segmentation...', verbose)
# creating a temporary folder in which all temporary files will be placed and deleted afterwards
path_tmp = tmp_create(basename="propseg")
im_seg = convert(Image(fname_segmentation))
im_seg.save(os.path.join(path_tmp, "tmp.segmentation.nii.gz"), mutable=True, verbose=0)
im_centerline = convert(Image(fname_centerline))
im_centerline.save(os.path.join(path_tmp, "tmp.centerline.nii.gz"), mutable=True, verbose=0)
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# convert input to RPI (and store original info to use when converting back at the end)
fname_seg_absolute = os.path.abspath(fname_segmentation)
image_input_orientation = im_seg.orientation
sct_image.main("-i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz -v 0".split())
sct_image.main("-i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz -v 0".split())
# go through segmentation image, and compare with centerline from propseg
im_seg = Image('tmp.segmentation_RPI.nii.gz')
im_centerline = Image('tmp.centerline_RPI.nii.gz')
# Get size of data
printv('\nGet data dimensions...', verbose)
nx, ny, nz, nt, px, py, pz, pt = im_seg.dim
# extraction of centerline provided by isct_propseg and computation of center of mass for each slice
# the centerline is defined as the center of the tubular mesh outputed by propseg.
centerline, key_centerline = {}, []
for i in range(nz):
slice = im_centerline.data[:, :, i]
if np.any(slice):
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
centerline[str(i)] = [x_centerline, y_centerline]
key_centerline.append(i)
minz_centerline = np.min(key_centerline)
maxz_centerline = np.max(key_centerline)
mid_slice = int((maxz_centerline - minz_centerline) / 2)
# for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
# If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
slices_to_remove = [False] * nz # flag that decides if the slice must be removed
for i in range(minz_centerline, maxz_centerline + 1):
# extraction of slice
slice = im_seg.data[:, :, i]
distance = -1
label_objects, nb_labels = ndi.label(slice) # count binary objects in the slice
if nb_labels > 1: # if there is more that one object in the slice, the slice is removed from the segmentation
slices_to_remove[i] = True
elif nb_labels == 1: # check if the centerline is coherent with the one from isct_propseg
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
slice_nearest_coord = min(key_centerline, key=lambda x: abs(x - i))
coord_nearest_coord = centerline[str(slice_nearest_coord)]
distance = np.sqrt(((x_centerline - coord_nearest_coord[0]) * px) ** 2 +
((y_centerline - coord_nearest_coord[1]) * py) ** 2 +
((i - slice_nearest_coord) * pz) ** 2)
if distance >= threshold_distance: # threshold must be adjusted, default is 5 mm
slices_to_remove[i] = True
# Check list of removal and keep one continuous centerline (improve this comment)
# Method:
# starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
slice_to_change = False
for i in range(mid_slice, nz):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
slice_to_change = False
for i in range(mid_slice, 0, -1):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
for i in range(0, nz):
# remove the slice
if slices_to_remove[i]:
im_seg.data[:, :, i] *= 0
# saving the image
im_seg.save('tmp.segmentation_RPI_c.nii.gz')
# replacing old segmentation with the corrected one
sct_image.main('-i tmp.segmentation_RPI_c.nii.gz -setorient {} -o {} -v 0'.
format(image_input_orientation, fname_seg_absolute).split())
os.chdir(curdir)
# display information about how much of the segmentation has been corrected
# remove temporary files
if remove_temp_files:
# printv("\nRemove temporary files...", verbose)
rmtree(path_tmp)
def get_parser():
# Initialize the parser
parser = SCTArgumentParser(
description=(
"This program segments automatically the spinal cord on T1- and T2-weighted images, for any field of view. "
"You must provide the type of contrast, the image as well as the output folder path. The segmentation "
"follows the spinal cord centerline, which is provided by an automatic tool: Optic. The initialization of "
"the segmentation is made on the median slice of the centerline, and can be ajusted using the -init "
"parameter. The initial radius of the tubular mesh that will be propagated should be adapted to size of "
"the spinal cord on the initial propagation slice. \n"
"\n"
"Primary output is the binary mask of the spinal cord segmentation. This method must provide VTK "
"triangular mesh of the segmentation (option -mesh). Spinal cord centerline is available as a binary image "
"(-centerline-binary) or a text file with coordinates in world referential (-centerline-coord).\n"
"\n"
"Cross-sectional areas along the spinal cord can be available (-cross). Several tips on segmentation "
"correction can be found on the 'Correcting sct_propseg' page in the Tutorials section of the "
"documentation.\n"
"\n"
"If the segmentation fails at some location (e.g. due to poor contrast between spinal cord and CSF), edit "
"your anatomical image (e.g. with fslview) and manually enhance the contrast by adding bright values "
"around the spinal cord for T2-weighted images (dark values for T1-weighted). Then, launch the "
"segmentation again.\n"
"\n"
"References:\n"
" - [De Leener B, Kadoury S, Cohen-Adad J. Robust, accurate and fast automatic segmentation of the spinal "
"cord. Neuroimage 98, 2014. pp 528-536. DOI: 10.1016/j.neuroimage.2014.04.051](https://pubmed.ncbi.nlm.nih.gov/24780696/)\n"
" - [De Leener B, Cohen-Adad J, Kadoury S. Automatic segmentation of the spinal cord and spinal canal "
"coupled with vertebral labeling. IEEE Trans Med Imaging. 2015 Aug;34(8):1705-18.](https://pubmed.ncbi.nlm.nih.gov/26011879/)"
)
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2', 't2s', 'dwi'],
required=True,
help="Type of image contrast. If your contrast is not in the available options (t1, t2, t2s, dwi), use "
"t1 (cord bright / CSF dark) or t2 (cord dark / CSF bright)"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-o',
metavar=Metavar.file,
help='Output filename. Example: spinal_seg.nii.gz '
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="Output folder."
)
optional.add_argument(
'-down',
metavar=Metavar.int,
type=int,
help="Down limit of the propagation. Default is 0."
)
optional.add_argument(
'-up',
metavar=Metavar.int,
type=int,
help="Up limit of the propagation. Default is the highest slice of the image."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
optional.add_argument(
'-mesh',
action="store_true",
help="Output: mesh of the spinal cord segmentation"
)
optional.add_argument(
'-centerline-binary',
action="store_true",
help="Output: centerline as a binary image."
)
optional.add_argument(
'-CSF',
action="store_true",
help="Output: CSF segmentation."
)
optional.add_argument(
'-centerline-coord',
action="store_true",
help="Output: centerline in world coordinates."
)
optional.add_argument(
'-cross',
action="store_true",
help="Output: cross-sectional areas."
)
optional.add_argument(
'-init-tube',
action="store_true",
help="Output: initial tubular meshes."
)
optional.add_argument(
'-low-resolution-mesh',
action="store_true",
help="Output: low-resolution mesh."
)
optional.add_argument(
'-init-centerline',
metavar=Metavar.file,
help="R|Filename of centerline to use for the propagation. Use format .txt or .nii; see file structure in "
"documentation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing centerline. Example: "
"-init-centerline viewer"
)
optional.add_argument(
'-init',
metavar=Metavar.float,
type=float,
help="Axial slice where the propagation starts, default is middle axial slice."
)
optional.add_argument(
'-init-mask',
metavar=Metavar.file,
help="R|Mask containing three center of the spinal cord, used to initiate the propagation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing mask. Example: -init-mask viewer"
)
optional.add_argument(
'-mask-correction',
metavar=Metavar.file,
help="mask containing binary pixels at edges of the spinal cord on which the segmentation algorithm will be "
"forced to register the surface. Can be used in case of poor/missing contrast between spinal cord and "
"CSF or in the presence of artefacts/pathologies."
)
optional.add_argument(
'-rescale',
metavar=Metavar.float,
type=float,
default=1.0,
help="Rescale the image (only the header, not the data) in order to enable segmentation on spinal cords with "
"dimensions different than that of humans (e.g., mice, rats, elephants, etc.). For example, if the "
"spinal cord is 2x smaller than that of human, then use -rescale 2"
)
optional.add_argument(
'-radius',
metavar=Metavar.float,
type=float,
help="Approximate radius (in mm) of the spinal cord. Default is 4."
)
optional.add_argument(
'-nbiter',
metavar=Metavar.int,
type=int,
help="Stop condition (affects only the Z propogation): number of iteration for the propagation for both "
"direction. Default is 200."
)
optional.add_argument(
'-max-area',
metavar=Metavar.float,
type=float,
help="[mm^2], stop condition (affects only the Z propogation): maximum cross-sectional area. Default is 120."
)
optional.add_argument(
'-max-deformation',
metavar=Metavar.float,
type=float,
help="[mm], stop condition (affects only the Z propogation): maximum deformation per iteration. Default is "
"2.5"
)
optional.add_argument(
'-min-contrast',
metavar=Metavar.float,
type=float,
help="[intensity value], stop condition (affects only the Z propogation): minimum local SC/CSF contrast, "
"default is 50"
)
optional.add_argument(
'-d',
metavar=Metavar.float,
type=float,
help="trade-off between distance of most promising point (d is high) and feature strength (d is low), "
"default depend on the contrast. Range of values from 0 to 50. 15-25 values show good results. Default "
"is 10."
)
optional.add_argument(
'-distance-search',
metavar=Metavar.float,
type=float,
help="maximum distance of optimal points computation along the surface normals. Range of values from 0 to 30. "
"Default is 15"
)
optional.add_argument(
'-alpha',
metavar=Metavar.float,
type=float,
help="Trade-off between internal (alpha is high) and external (alpha is low) forces. Range of values from 0 "
"to 50. Default is 25."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-correct-seg',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Enable (1) or disable (0) the algorithm that checks and correct the output segmentation. More "
"specifically, the algorithm checks if the segmentation is consistent with the centerline provided by "
"isct_propseg."
)
optional.add_argument(
'-igt',
metavar=Metavar.file,
help="File name of ground-truth segmentation."
)
return parser
def func_rescale_header(fname_data, rescale_factor, verbose=0):
"""
Rescale the voxel dimension by modifying the NIFTI header qform. Write the output file in a temp folder.
:param fname_data:
:param rescale_factor:
:return: fname_data_rescaled
"""
import nibabel as nib
img = nib.load(fname_data)
# get qform
qform = img.header.get_qform()
# multiply by scaling factor
qform[0:3, 0:3] *= rescale_factor
# generate a new nifti file
header_rescaled = img.header.copy()
header_rescaled.set_qform(qform)
# the data are the same-- only the header changes
img_rescaled = nib.nifti1.Nifti1Image(img.get_data(), None, header=header_rescaled)
path_tmp = tmp_create(basename="propseg")
fname_data_rescaled = os.path.join(path_tmp, os.path.basename(add_suffix(fname_data, "_rescaled")))
nib.save(img_rescaled, fname_data_rescaled)
return fname_data_rescaled
def propseg(img_input, options_dict):
"""
:param img_input: source image, to be segmented
:param options_dict: arguments as dictionary
:return: segmented Image
"""
arguments = options_dict
fname_input_data = img_input.absolutepath
fname_data = os.path.abspath(fname_input_data)
contrast_type = arguments.c
contrast_type_conversion = {'t1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1'}
contrast_type_propseg = contrast_type_conversion[contrast_type]
# Starting building the command
cmd = ['isct_propseg', '-t', contrast_type_propseg]
if arguments.o is not None:
fname_out = arguments.o
else:
fname_out = os.path.basename(add_suffix(fname_data, "_seg"))
if arguments.ofolder is not None:
folder_output = arguments.ofolder
else:
folder_output = str(pathlib.Path(fname_out).parent)
if not os.path.isdir(folder_output) and os.path.exists(folder_output):
logger.error("output directory %s is not a valid directory" % folder_output)
if not os.path.exists(folder_output):
os.makedirs(folder_output)
cmd += ['-o', folder_output]
if arguments.down is not None:
cmd += ["-down", str(arguments.down)]
if arguments.up is not None:
cmd += ["-up", str(arguments.up)]
remove_temp_files = arguments.r
verbose = int(arguments.v)
# Update for propseg binary
if verbose > 0:
cmd += ["-verbose"]
# Output options
if arguments.mesh is not None:
cmd += ["-mesh"]
if arguments.centerline_binary is not None:
cmd += ["-centerline-binary"]
if arguments.CSF is not None:
cmd += ["-CSF"]
if arguments.centerline_coord is not None:
cmd += ["-centerline-coord"]
if arguments.cross is not None:
cmd += ["-cross"]
if arguments.init_tube is not None:
cmd += ["-init-tube"]
if arguments.low_resolution_mesh is not None:
cmd += ["-low-resolution-mesh"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_nii is not None:
# cmd += ["-detect-nii"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_png is not None:
# cmd += ["-detect-png"]
# Helping options
use_viewer = None
use_optic = True # enabled by default
init_option = None
rescale_header = arguments.rescale
if arguments.init is not None:
init_option = float(arguments.init)
if init_option < 0:
printv('Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error')
sys.exit(1)
if arguments.init_centerline is not None:
if str(arguments.init_centerline) == "viewer":
use_viewer = "centerline"
elif str(arguments.init_centerline) == "hough":
use_optic = False
else:
if rescale_header != 1.0:
fname_labels_viewer = func_rescale_header(str(arguments.init_centerline), rescale_header, verbose=verbose)
else:
fname_labels_viewer = str(arguments.init_centerline)
cmd += ["-init-centerline", fname_labels_viewer]
use_optic = False
if arguments.init_mask is not None:
if str(arguments.init_mask) == "viewer":
use_viewer = "mask"
else:
if rescale_header != 1.0:
fname_labels_viewer = func_rescale_header(str(arguments.init_mask), rescale_header)
else:
fname_labels_viewer = str(arguments.init_mask)
cmd += ["-init-mask", fname_labels_viewer]
use_optic = False
if arguments.mask_correction is not None:
cmd += ["-mask-correction", str(arguments.mask_correction)]
if arguments.radius is not None:
cmd += ["-radius", str(arguments.radius)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_n is not None:
# cmd += ["-detect-n", str(arguments.detect_n)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_gap is not None:
# cmd += ["-detect-gap", str(arguments.detect_gap)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.init_validation is not None:
# cmd += ["-init-validation"]
if arguments.nbiter is not None:
cmd += ["-nbiter", str(arguments.nbiter)]
if arguments.max_area is not None:
cmd += ["-max-area", str(arguments.max_area)]
if arguments.max_deformation is not None:
cmd += ["-max-deformation", str(arguments.max_deformation)]
if arguments.min_contrast is not None:
cmd += ["-min-contrast", str(arguments.min_contrast)]
if arguments.d is not None:
cmd += ["-d", str(arguments["-d"])]
if arguments.distance_search is not None:
cmd += ["-dsearch", str(arguments.distance_search)]
if arguments.alpha is not None:
cmd += ["-alpha", str(arguments.alpha)]
# check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
image_input = Image(fname_data)
image_input_rpi = image_input.copy().change_orientation('RPI')
nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim
if nt > 1:
printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error')
path_data, file_data, ext_data = extract_fname(fname_data)
path_tmp = tmp_create(basename="label_vertebrae")
# rescale header (see issue #1406)
if rescale_header != 1.0:
fname_data_propseg = func_rescale_header(fname_data, rescale_header)
else:
fname_data_propseg = fname_data
# add to command
cmd += ['-i', fname_data_propseg]
# if centerline or mask is asked using viewer
if use_viewer:
from spinalcordtoolbox.gui.base import AnatomicalParams
from spinalcordtoolbox.gui.centerline import launch_centerline_dialog
params = AnatomicalParams()
if use_viewer == 'mask':
params.num_points = 3
params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels
params.starting_slice = 'midfovminusinterval'
if use_viewer == 'centerline':
# setting maximum number of points to a reasonable value
params.num_points = 20
params.interval_in_mm = 30
params.starting_slice = 'top'
im_data = Image(fname_data_propseg)
im_mask_viewer = zeros_like(im_data)
# im_mask_viewer.absolutepath = add_suffix(fname_data_propseg, '_labels_viewer')
controller = launch_centerline_dialog(im_data, im_mask_viewer, params)
fname_labels_viewer = add_suffix(fname_data_propseg, '_labels_viewer')
if not controller.saved:
printv('The viewer has been closed before entering all manual points. Please try again.', 1, 'error')
sys.exit(1)
# save labels
controller.as_niftii(fname_labels_viewer)
# add mask filename to parameters string
if use_viewer == "centerline":
cmd += ["-init-centerline", fname_labels_viewer]
elif use_viewer == "mask":
cmd += ["-init-mask", fname_labels_viewer]
# If using OptiC
elif use_optic:
image_centerline = optic.detect_centerline(image_input, contrast_type, verbose)
fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz')
image_centerline.save(fname_centerline_optic)
cmd += ["-init-centerline", fname_centerline_optic]
if init_option is not None:
if init_option > 1:
init_option /= (nz - 1)
cmd += ['-init', str(init_option)]
# enabling centerline extraction by default (needed by check_and_correct_segmentation() )
cmd += ['-centerline-binary']
# run propseg
status, output = run_proc(cmd, verbose, raise_exception=False, is_sct_binary=True)
# check status is not 0
if not status == 0:
printv('Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)',
1, 'error')
sys.exit(1)
# rename output files
fname_seg_old = os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_seg"))
fname_seg = os.path.join(folder_output, fname_out)
mv(fname_seg_old, fname_seg)
fname_centerline_old = os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_centerline"))
fname_centerline = os.path.join(folder_output, os.path.basename(add_suffix(fname_data, "_centerline")))
mv(fname_centerline_old, fname_centerline)
# if viewer was used, copy the labelled points to the output folder
if use_viewer:
fname_labels_viewer_new = os.path.join(folder_output, os.path.basename(add_suffix(fname_data, "_labels_viewer")))
copy(fname_labels_viewer, fname_labels_viewer_new)
# update variable (used later)
fname_labels_viewer = fname_labels_viewer_new
# check consistency of segmentation
if arguments.correct_seg:
check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0,
remove_temp_files=remove_temp_files, verbose=verbose)
# copy header from input to segmentation to make sure qform is the same
printv("Copy header input --> output(s) to make sure qform is the same.", verbose)
list_fname = [fname_seg, fname_centerline]
if use_viewer:
list_fname.append(fname_labels_viewer)
for fname in list_fname:
im = Image(fname)
im.header = image_input.header
im.save(dtype='int8') # they are all binary masks hence fine to save as int8
return Image(fname_seg)
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
fname_input_data = os.path.abspath(arguments.i)
img_input = Image(fname_input_data)
img_seg = propseg(img_input, arguments)
fname_seg = img_seg.absolutepath
path_qc = arguments.qc
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
if path_qc is not None:
generate_qc(fname_in1=fname_input_data, fname_seg=fname_seg, args=arguments, path_qc=os.path.abspath(path_qc),
dataset=qc_dataset, subject=qc_subject, process='sct_propseg')
display_viewer_syntax([fname_input_data, fname_seg], colormaps=['gray', 'red'], opacities=['', '1'])
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| |
#!/usr/bin/env python
#
# Field class implementation
import struct
import array
import sys
from pif_ir.meta_ir.common import *
from pif_ir.air.utils.air_exception import *
def field_width_get(field_name, attrs, field_values, remaining_bits=None):
"""
@brief Get the width of a field based on current values
@param field_name The name of the field (for debug messages only)
@param attrs The attributes from the top level desc
@param field_values A dict used in eval if width is an expression
@param remaining_bits Number of bits remaining in header, if known
@todo Consider changing semantics to return an error (None)
and do not do the assert here
If the width calculation is negative, 0 is returned.
"""
if isinstance(attrs, int):
return attrs
if isinstance(attrs, dict):
meta_ir_assert("width" in attrs.keys(), "Bad field attrs; no width")
return field_width_get(field_name, attrs["width"], field_values)
# Attempt to evaluate a string expression
meta_ir_assert(isinstance(attrs, str),
"Bad field attrs; not int, dict, string")
try:
width = eval(attrs, {"__builtins__":None}, field_values)
except: # @todo NameError?
raise AirReferenceError("Bad width expression for %s" + field_name)
# If calculation is negative, return 0
if width < 0:
return 0
return width
class field_instance(object):
"""
@brief A field is a value and a map of attributes
@param name The name of the field as it appears in its parent header
@param attrs Either a dict or an int; if an int, it is the width in bits
@param width Width in bits of the field
@param value An optional initial value of the field (host representation)
Fields of width <= 64 bits are stored as integer values
Fields of greater width are stored as "B" arrays. width must be
divisible by 8.
"""
# Masks for bit widths <= 8
BYTE_MASK = [0, 1, 3, 7, 0xf, 0x1f, 0x3f, 0x7f, 0xff]
def __init__(self, name, attrs, width, value=None):
self.name = name
self.attrs = attrs
self.width = width
self.value = value
def extract(self, buf, header_offset, bit_offset):
"""
@brief Extract the field value from the given header instance
@param buf A byte array holding the header data
@param header_offset Start of the header instance in the buf
@param bit_offset The bit offset into the header where the field starts
NOTE that bit_offset is the offset from the start of the header, so
it may be greater than 8.
@todo Assumes the field does not extend beyond the packet boundary
"""
meta_ir_assert(self.width >= 0,
"Unknown width when extracting field %s" % self.name)
byte_offset = header_offset + (bit_offset / 8)
bit_offset = bit_offset % 8
width = self.width
# Easy cases:
if bit_offset == 0:
base = byte_offset
if width == 8:
self.value = struct.unpack("!B", buf[base:base+1])[0]
return self.value
elif width == 16:
self.value = struct.unpack("!H", buf[base:base+2])[0]
return self.value
elif width == 32:
self.value = struct.unpack("!L", buf[base:base+4])[0]
return self.value
elif width == 64:
self.value = struct.unpack("!Q", buf[base:base+8])[0]
return self.value
elif width > 64:
self.value = bytearray(buf[base: base+width/8])
return self.value
meta_ir_assert(width < 64, "Bad field width/offset %d, %d for %s" %
(width, bit_offset, self.name))
# Extract bytes into an array
# Iterate thru the array accumulating value
# Note that bit offsets are from high order of byte
bytes_needed = (width + bit_offset + 7) / 8
low = header_offset + byte_offset
high = low + bytes_needed
bytes = bytearray(buf[low:high])
value = 0
while width > 0:
if width + bit_offset <= 8:
high_bit = 7 - bit_offset
low_bit = high_bit - width + 1
val_from_byte = (bytes[0] >> low_bit) & self.BYTE_MASK[width]
shift = ((width < 8) and width) or 8
value = (value << shift) + val_from_byte
break
else:
if bit_offset == 0:
value = (value << 8) + bytes.pop(0)
width -= 8
else:
high_bit = 7 - bit_offset
val_from_byte = bytes.pop(0) & self.BYTE_MASK[high_bit + 1]
width -= (high_bit + 1)
bit_offset = 0
shift = ((width < 8) and width) or 8
value = (value << shift) + val_from_byte
self.value = value
return value
def update_header_bytes(self, byte_list, bit_offset):
"""
@brief Update a header (a list of bytes) with the current field value
@param byte_list A bytearray representing the entire header
@param bit_offset The bit offset of the field from the start
of the header
"""
meta_ir_assert(self.width >= 0,
"Unknown field width for %s when updating header"
% self.name)
byte_offset = (bit_offset / 8)
bit_offset = bit_offset % 8
width = self.width
# Easy cases:
if width == 0:
return
# Value is an array of bytes: copy them in
if isinstance(self.value, bytearray):
for idx in range(len(self.value)):
byte_list[byte_offset + idx] = self.value[idx]
return
# Byte boundary and just bytes
# @todo Assumes big endian in the packet
value = self.value
if bit_offset == 0 and width % 8 == 0:
for idx in reversed(range(width / 8)):
byte_list[byte_offset + idx] = value & 0xff
value >>= 8
return
# Hard cases: Shift value appropriately and convert to bytes
# @todo This will have a problem if value << shift overflows
bytes_needed = (width + bit_offset + 7) / 8
shift = 8 - ((bit_offset + width) % 8)
if shift == 8: shift = 0
value <<= shift
for idx in range(bytes_needed):
value_byte = (value >> (8 * (bytes_needed - 1 - idx))) & 0xFF
#print " VAL", value_byte, width, bit_offset
if width + bit_offset <= 8: # Fits in this byte and done
shift = 8 - (bit_offset + width)
mask = self.BYTE_MASK[width] << shift
#print " MASK", mask, byte_offset
byte_list[byte_offset + idx] &= ~mask
byte_list[byte_offset + idx] |= value_byte
# Should be last entry
width = 0
else: # Goes to end of byte
if bit_offset == 0: # Covers whole byte
# width > 8 by above
byte_list[byte_offset + idx] = value_byte
width -= 8
else: # Covers lower bits of byte
# width + bit_offset > 8, so goes to end of byte
mask = self.BYTE_MASK[8 - bit_offset]
byte_list[byte_offset + idx] &= ~mask
byte_list[byte_offset + idx] |= value_byte
width -= (8 - bit_offset)
bit_offset = 0
################################################################
#
# Test code
#
################################################################
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, filename=sys.argv[1])
logging.info("RUNNING MODULE: %s" % __file__)
# Testing code for field class
attrs = 17
meta_ir_assert(field_width_get("fld", attrs, {}) == 17, "Failed field test 1")
attrs = {"width" : 17}
meta_ir_assert(field_width_get("fld", attrs, {}) == 17, "Failed field test 2")
attrs = "10 + 7"
meta_ir_assert(field_width_get("fld", attrs, {}) == 17, "Failed field test 3")
attrs = "x + 7"
meta_ir_assert(field_width_get("fld", attrs, {"x" : 10}) == 17,
"Failed field test 4")
attrs = {"width" : "x + 7"}
meta_ir_assert(field_width_get("fld", attrs, {"x" : 10}) == 17,
"Failed field test 5")
try:
field_width_get("fld", "bad string", {})
meta_ir_assert(False, "Failed field test 6")
except AirReferenceError:
pass
# Two VLAN tags w/ IDs 356 and 200; the first has priority 5
data = struct.pack("BBBBBBBB", 0x81, 0, 0xa1, 0x64, 0x81, 0, 0, 0xc8)
field = field_instance("fld", {}, 8)
field.extract(data, 0, 0)
meta_ir_assert(field.value == 0x81, "Failed field test 7")
field = field_instance("fld", {}, 16)
field.extract(data, 0, 0)
meta_ir_assert(field.value == 0x8100, "Failed field test 7")
field = field_instance("vid", {}, 12)
field.extract(data, 0, 20)
meta_ir_assert(field.value == 356, "Failed field test 8")
field = field_instance("pcp", {}, 3)
field.extract(data, 0, 16)
meta_ir_assert(field.value == 5, "Failed field test 9")
# @todo Write test cases for fields that are longer byte streams
values = range(16)
values.extend([0xaaaaaaaa, 0x55555555, 0xffffffff])
# Test with all 1 bits as baseline
for width in range(33):
for offset in range(32):
for value in values:
field = field_instance("f%d_%d" % (width, offset), {}, width)
value &= ((1 << width) - 1)
field.value = value
byte_list = bytearray(8)
for idx in range(8): byte_list[idx] = 0xff
#print "START", value, width, offset
field.update_header_bytes(byte_list, offset)
#print " UPDATE", array.array("B", byte_list)
field.extract(byte_list, 0, offset)
#print " EXTRACTED", field.value
meta_ir_assert(field.value == value,
"Failed, all 1s, width %d, offset %d" %
(width, offset))
# Test with all 0 bits as baseline
for width in range(33):
for offset in range(32):
for value in values:
field = field_instance("f%d_%d" % (width, offset), {}, width)
value &= ((1 << width) - 1)
field.value = value
byte_list = bytearray(8)
field.update_header_bytes(byte_list, offset)
field.extract(byte_list, 0, offset)
meta_ir_assert(field.value == value,
"Failed convert case width %d, offset %d" %
(width, offset))
| |
"""
-----------------------------------------------------------------------------
Copyright (c) 2009-2017, Shotgun Software Inc
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Shotgun Software Inc nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
M O C K G U N
-----------------------------------------------------------------------------
Experimental software ahead!
----------------------------
Disclaimer! Mockgun is in its early stages of development. It is not fully
compatible with the Shotgun API yet and we offer no guarantees at this point
that future versions of Mockgun will be backwards compatible. Consider this
alpha level software and use at your own risk.
What is Mockgun?
----------------
Mockgun is a Shotgun API mocker. It's a class that has got *most* of the same
methods and parameters that the Shotgun API has got. Mockgun is essentially a
Shotgun *emulator* that (for basic operations) looks and feels like Shotgun.
The primary purpose of Mockgun is to drive unit test rigs where it becomes
too slow, cumbersome or non-practical to connect to a real Shotgun. Using a
Mockgun for unit tests means that a test can be rerun over and over again
from exactly the same database state. This can be hard to do if you connect
to a live Shotgun instance.
How do I use Mockgun?
---------------------
First of all, you need a Shotgun schema to run against. This will define
all the fields and entities that mockgun will use. Simply connect to
your Shotgun site and use the generate_schema() method to download
the schema data:
# connect to your site
from shotgun_api3 import Shotgun
sg = Shotgun("https://mysite.shotgunstudio.com", script_name="xyz", api_key="abc")
# write out schema data to files
from shotgun_api3.lib import mockgun
mockgun.generate_schema(sg, "/tmp/schema", "/tmp/entity_schema")
Now that you have a schema, you can tell your mockgun instance about it.
We do this as a class-level operation, so that the consctructor can be
exactly like the real Shotgun one:
from shotgun_api3.lib import mockgun
# tell mockgun about the schema
mockgun.Shotgun.set_schema_paths("/tmp/schema", "/tmp/entity_schema")
# we are ready to mock!
# this call will not connect to mysite, but instead create a
# mockgun instance which is connected to an *empty* shotgun site
# which has got the same schema as mysite.
sg = mockgun.Shotgun("https://mysite.shotgunstudio.com", script_name="xyz", api_key="abc")
# now you can start putting stuff in
print(sg.create("HumanUser", {"firstname": "John", "login": "john"}))
# prints {'login': 'john', 'type': 'HumanUser', 'id': 1, 'firstname': 'John'}
# and find what you have created
print(sg.find("HumanUser", [["login", "is", "john"]]))
prints [{'type': 'HumanUser', 'id': 1}]
That's it! Mockgun is used to run the Shotgun Pipeline Toolkit unit test rig.
Mockgun has a 'database' in the form of a dictionary stored in Mockgun._db
By editing this directly, you can modify the database without going through
the API.
What are the limitations?
---------------------
There are many. Don't expect mockgun to be fully featured at this point.
Below is a non-exhaustive list of things that we still need to implement:
- Many find queries won't work
- Methods around session handling and authentication is not implemented
- Attachments and upload is rundimental at best
- Schema modification isn't most most likely will never be supported
- There is no validation or sanitation
"""
import datetime
from ... import ShotgunError
from ...shotgun import _Config
from .errors import MockgunError
from .schema import SchemaFactory
from .. import six
# ----------------------------------------------------------------------------
# Version
__version__ = "0.0.1"
# ----------------------------------------------------------------------------
# API
class Shotgun(object):
"""
Mockgun is a mocked Shotgun API, designed for test purposes.
It generates an object which looks and feels like a normal Shotgun API instance.
Instead of connecting to a real server, it keeps all its data in memory in a way
which makes it easy to introspect and test.
The methods presented in this class reflect the Shotgun API and are therefore
sparsely documented.
Please note that this class is built for test purposes only and only creates an
object which *roughly* resembles the Shotgun API - however, for most common
use cases, this is enough to be able to perform relevant and straight forward
testing of code.
"""
__schema_path = None
__schema_entity_path = None
@classmethod
def set_schema_paths(cls, schema_path, schema_entity_path):
"""
Set the path where schema files can be found. This is done at the class
level so all Shotgun instances will share the same schema.
The responsability to generate and load these files is left to the user
changing the default value.
:param schema_path: Directory path where schema files are.
"""
cls.__schema_path = schema_path
cls.__schema_entity_path = schema_entity_path
@classmethod
def get_schema_paths(cls):
"""
Returns a tuple with paths to the files which are part of the schema.
These paths can then be used in generate_schema if needed.
:returns: A tuple with schema_file_path and schema_entity_file_path
"""
return (cls.__schema_path, cls.__schema_entity_path)
def __init__(self,
base_url,
script_name=None,
api_key=None,
convert_datetimes_to_utc=True,
http_proxy=None,
ensure_ascii=True,
connect=True,
ca_certs=None,
login=None,
password=None,
sudo_as_login=None,
session_token=None,
auth_token=None):
# emulate the config object in the Shotgun API.
# these settings won't make sense for mockgun, but
# having them present means code and get and set them
# they way they would expect to in the real API.
self.config = _Config(self)
self.config.set_server_params(base_url)
# load in the shotgun schema to associate with this Shotgun
(schema_path, schema_entity_path) = self.get_schema_paths()
if schema_path is None or schema_entity_path is None:
raise MockgunError("Cannot create Mockgun instance because no schema files have been defined. "
"Before creating a Mockgun instance, please call Mockgun.set_schema_paths() "
"in order to specify which ShotGrid schema Mockgun should operate against.")
self._schema, self._schema_entity = SchemaFactory.get_schemas(schema_path, schema_entity_path)
# initialize the "database"
self._db = dict((entity, {}) for entity in self._schema)
# set some basic public members that exist in the Shotgun API
self.base_url = base_url
# bootstrap the event log
# let's make sure there is at least one event log id in our mock db
data = {}
data["event_type"] = "Hello_Mockgun_World"
data["description"] = "Mockgun was born. Yay."
self.create("EventLogEntry", data)
self.finds = 0
###################################################################################################
# public API methods
def get_session_token(self):
return "bogus_session_token"
def schema_read(self):
return self._schema
def schema_field_create(self, entity_type, data_type, display_name, properties=None):
raise NotImplementedError
def schema_field_update(self, entity_type, field_name, properties):
raise NotImplementedError
def schema_field_delete(self, entity_type, field_name):
raise NotImplementedError
def schema_entity_read(self):
return self._schema_entity
def schema_field_read(self, entity_type, field_name=None):
if field_name is None:
return self._schema[entity_type]
else:
return dict((k, v) for k, v in self._schema[entity_type].items() if k == field_name)
def find(
self, entity_type, filters, fields=None, order=None, filter_operator=None,
limit=0, retired_only=False, page=0
):
self.finds += 1
self._validate_entity_type(entity_type)
# do not validate custom fields - this makes it hard to mock up a field quickly
# self._validate_entity_fields(entity_type, fields)
# FIXME: This should be refactored so that we can use the complex filer
# style in nested filter operations.
if isinstance(filters, dict):
# complex filter style!
# {'conditions': [{'path': 'id', 'relation': 'is', 'values': [1]}], 'logical_operator': 'and'}
resolved_filters = []
for f in filters["conditions"]:
if f["path"].startswith("$FROM$"):
# special $FROM$Task.step.entity syntax
# skip this for now
continue
if len(f["values"]) != 1:
# {'path': 'id', 'relation': 'in', 'values': [1,2,3]} --> ["id", "in", [1,2,3]]
resolved_filters.append([f["path"], f["relation"], f["values"]])
else:
# {'path': 'id', 'relation': 'is', 'values': [3]} --> ["id", "is", 3]
resolved_filters.append([f["path"], f["relation"], f["values"][0]])
else:
# traditiona style sg filters
resolved_filters = filters
results = [
# Apply the filters for every single entities for the given entity type.
row for row in self._db[entity_type].values()
if self._row_matches_filters(
entity_type, row, resolved_filters, filter_operator, retired_only
)
]
# handle the ordering of the recordset
if order:
# order: [{"field_name": "code", "direction": "asc"}, ... ]
for order_entry in order:
if "field_name" not in order_entry:
raise ValueError("Order clauses must be list of dicts with keys 'field_name' and 'direction'!")
order_field = order_entry["field_name"]
if order_entry["direction"] == "asc":
desc_order = False
elif order_entry["direction"] == "desc":
desc_order = True
else:
raise ValueError("Unknown ordering direction")
results = sorted(results, key=lambda k: k[order_field], reverse=desc_order)
if fields is None:
fields = set(["type", "id"])
else:
fields = set(fields) | set(["type", "id"])
# get the values requested
val = [dict((field, self._get_field_from_row(entity_type, row, field)) for field in fields) for row in results]
return val
def find_one(
self, entity_type, filters, fields=None, order=None, filter_operator=None,
retired_only=False
):
results = self.find(
entity_type, filters, fields=fields,
order=order, filter_operator=filter_operator, retired_only=retired_only
)
return results[0] if results else None
def batch(self, requests):
results = []
for request in requests:
if request["request_type"] == "create":
results.append(self.create(request["entity_type"], request["data"]))
elif request["request_type"] == "update":
# note: Shotgun.update returns a list of a single item
results.append(self.update(request["entity_type"], request["entity_id"], request["data"])[0])
elif request["request_type"] == "delete":
results.append(self.delete(request["entity_type"], request["entity_id"]))
else:
raise ShotgunError("Invalid request type %s in request %s" % (request["request_type"], request))
return results
def create(self, entity_type, data, return_fields=None):
# special handling of storage fields - if a field value
# is a dict with a key local_path, then add fields
# local_path_linux, local_path_windows, local_path_mac
# as a reflection of this
for d in data:
if isinstance(data[d], dict) and "local_path" in data[d]:
# partly imitate some of the business logic happening on the
# server side of shotgun when a file/link entity value is created
if "local_storage" not in data[d]:
data[d]["local_storage"] = {"id": 0, "name": "auto_generated_by_mockgun", "type": "LocalStorage"}
if "local_path_linux" not in data[d]:
data[d]["local_path_linux"] = data[d]["local_path"]
if "local_path_windows" not in data[d]:
data[d]["local_path_windows"] = data[d]["local_path"]
if "local_path_mac" not in data[d]:
data[d]["local_path_mac"] = data[d]["local_path"]
self._validate_entity_type(entity_type)
self._validate_entity_data(entity_type, data)
self._validate_entity_fields(entity_type, return_fields)
try:
# get next id in this table
next_id = max(self._db[entity_type]) + 1
except ValueError:
next_id = 1
row = self._get_new_row(entity_type)
self._update_row(entity_type, row, data)
row["id"] = next_id
self._db[entity_type][next_id] = row
if return_fields is None:
result = dict((field, self._get_field_from_row(entity_type, row, field)) for field in data)
else:
result = dict((field, self._get_field_from_row(entity_type, row, field)) for field in return_fields)
result["type"] = row["type"]
result["id"] = row["id"]
return result
def update(self, entity_type, entity_id, data):
self._validate_entity_type(entity_type)
self._validate_entity_data(entity_type, data)
self._validate_entity_exists(entity_type, entity_id)
row = self._db[entity_type][entity_id]
self._update_row(entity_type, row, data)
return [dict((field, item) for field, item in row.items() if field in data or field in ("type", "id"))]
def delete(self, entity_type, entity_id):
self._validate_entity_type(entity_type)
self._validate_entity_exists(entity_type, entity_id)
row = self._db[entity_type][entity_id]
if not row["__retired"]:
row["__retired"] = True
return True
else:
return False
def revive(self, entity_type, entity_id):
self._validate_entity_type(entity_type)
self._validate_entity_exists(entity_type, entity_id)
row = self._db[entity_type][entity_id]
if row["__retired"]:
row["__retired"] = False
return True
else:
return False
def upload(self, entity_type, entity_id, path, field_name=None, display_name=None, tag_list=None):
raise NotImplementedError
def upload_thumbnail(self, entity_type, entity_id, path, **kwargs):
pass
###################################################################################################
# internal methods and members
def _validate_entity_type(self, entity_type):
if entity_type not in self._schema:
raise ShotgunError("%s is not a valid entity" % entity_type)
def _validate_entity_data(self, entity_type, data):
if "id" in data or "type" in data:
raise ShotgunError("Can't set id or type on create or update")
self._validate_entity_fields(entity_type, data.keys())
for field, item in data.items():
if item is None:
# none is always ok
continue
field_info = self._schema[entity_type][field]
if field_info["data_type"]["value"] == "multi_entity":
if not isinstance(item, list):
raise ShotgunError(
"%s.%s is of type multi_entity, but data %s is not a list" %
(entity_type, field, item)
)
elif item and any(not isinstance(sub_item, dict) for sub_item in item):
raise ShotgunError(
"%s.%s is of type multi_entity, but data %s contains a non-dictionary" %
(entity_type, field, item)
)
elif item and any("id" not in sub_item or "type" not in sub_item for sub_item in item):
raise ShotgunError(
"%s.%s is of type multi-entity, but an item in data %s does not contain 'type' and 'id'" %
(entity_type, field, item)
)
elif item and any(
sub_item["type"] not in field_info["properties"]["valid_types"]["value"] for sub_item in item
):
raise ShotgunError(
"%s.%s is of multi-type entity, but an item in data %s has an invalid type (expected one of %s)"
% (entity_type, field, item, field_info["properties"]["valid_types"]["value"])
)
elif field_info["data_type"]["value"] == "entity":
if not isinstance(item, dict):
raise ShotgunError(
"%s.%s is of type entity, but data %s is not a dictionary" %
(entity_type, field, item)
)
elif "id" not in item or "type" not in item:
raise ShotgunError(
"%s.%s is of type entity, but data %s does not contain 'type' and 'id'"
% (entity_type, field, item)
)
# elif item["type"] not in field_info["properties"]["valid_types"]["value"]:
# raise ShotgunError(
# "%s.%s is of type entity, but data %s has an invalid type (expected one of %s)" %
# (entity_type, field, item, field_info["properties"]["valid_types"]["value"])
# )
else:
try:
sg_type = field_info["data_type"]["value"]
python_type = {"number": int,
"float": float,
"checkbox": bool,
"percent": int,
"text": six.string_types,
"serializable": dict,
"date": datetime.date,
"date_time": datetime.datetime,
"list": six.string_types,
"status_list": six.string_types,
"url": dict}[sg_type]
except KeyError:
raise ShotgunError(
"Field %s.%s: Handling for ShotGrid type %s is not implemented" %
(entity_type, field, sg_type)
)
if not isinstance(item, python_type):
raise ShotgunError(
"%s.%s is of type %s, but data %s is not of type %s" %
(entity_type, field, type(item), sg_type, python_type)
)
# TODO: add check for correct timezone
def _validate_entity_fields(self, entity_type, fields):
self._validate_entity_type(entity_type)
if fields is not None:
valid_fields = set(self._schema[entity_type].keys())
for field in fields:
try:
field2, entity_type2, field3 = field.split(".", 2)
self._validate_entity_fields(entity_type2, [field3])
except ValueError:
if field not in valid_fields and field not in ("type", "id"):
raise ShotgunError("%s is not a valid field for entity %s" % (field, entity_type))
def _get_default_value(self, entity_type, field):
field_info = self._schema[entity_type][field]
if field_info["data_type"]["value"] == "multi_entity":
default_value = []
else:
default_value = field_info["properties"]["default_value"]["value"]
return default_value
def _get_new_row(self, entity_type):
row = {"type": entity_type, "__retired": False}
for field in self._schema[entity_type]:
field_info = self._schema[entity_type][field]
if field_info["data_type"]["value"] == "multi_entity":
default_value = []
else:
default_value = field_info["properties"]["default_value"]["value"]
row[field] = default_value
return row
def _compare(self, field_type, lval, operator, rval):
"""
Compares a field using the operator and value provide by the filter.
:param str field_type: Type of the field we are operating on.
:param lval: Value inside that field. Can be of any type: datetime, date, int, str, bool, etc.
:param str operator: Name of the operator to use.
:param rval: The value following the operator in a filter.
:returns: The result of the operator that was applied.
:rtype: bool
"""
# If we have a list of scalar values
if isinstance(lval, list) and field_type != "multi_entity":
# Compare each one. If one matches the predicate we're good!
return any((self._compare(field_type, sub_val, operator, rval)) for sub_val in lval)
if field_type == "checkbox":
if operator == "is":
return lval == rval
elif operator == "is_not":
return lval != rval
elif field_type in ("float", "number", "date", "date_time"):
if operator == "is":
return lval == rval
elif operator == "is_not":
return lval != rval
elif operator == "less_than":
return lval < rval
elif operator == "greater_than":
return lval > rval
elif operator == "between":
return lval >= rval[0] and lval <= rval[1]
elif operator == "not_between":
return lval < rval[0] or lval > rval[1]
elif operator == "in":
return lval in rval
elif field_type in ("list", "status_list"):
if operator == "is":
return lval == rval
elif operator == "is_not":
return lval != rval
elif operator == "in":
return lval in rval
elif operator == "not_in":
return lval not in rval
elif field_type == "entity_type":
if operator == "is":
return lval == rval
elif field_type == "text":
if operator == "is":
return lval == rval
elif operator == "is_not":
return lval != rval
elif operator == "in":
return lval in rval
elif operator == "contains":
return rval in lval
elif operator == "not_contains":
return lval not in rval
elif operator == "starts_with":
return lval.startswith(rval)
elif operator == "ends_with":
return lval.endswith(rval)
elif operator == "not_in":
return lval not in rval
elif field_type == "entity":
if operator == "is":
# If one of the two is None, ensure both are.
if lval is None or rval is None:
return lval == rval
# Both values are set, compare them.
return lval["type"] == rval["type"] and lval["id"] == rval["id"]
elif operator == "is_not":
if lval is None or rval is None:
return lval != rval
if rval is None:
# We already know lval is not None, so we know they are not equal.
return True
return lval["type"] != rval["type"] or lval["id"] != rval["id"]
elif operator == "in":
return all((lval["type"] == sub_rval["type"] and lval["id"] == sub_rval["id"]) for sub_rval in rval)
elif operator == "type_is":
return lval["type"] == rval
elif operator == "type_is_not":
return lval["type"] != rval
elif operator == "name_contains":
return rval in lval["name"]
elif operator == "name_not_contains":
return rval not in lval["name"]
elif operator == "name_starts_with":
return lval["name"].startswith(rval)
elif operator == "name_ends_with":
return lval["name"].endswith(rval)
elif field_type == "multi_entity":
if operator == "is":
if rval is None:
return len(lval) == 0
return rval["id"] in (sub_lval["id"] for sub_lval in lval)
elif operator == "is_not":
if rval is None:
return len(lval) != 0
return rval["id"] not in (sub_lval["id"] for sub_lval in lval)
raise ShotgunError("The %s operator is not supported on the %s type" % (operator, field_type))
def _get_field_from_row(self, entity_type, row, field):
# split dotted form fields
try:
# is it something like sg_sequence.Sequence.code ?
field2, entity_type2, field3 = field.split(".", 2)
if field2 in row:
field_value = row[field2]
# If we have a list of links, retrieve the subfields one by one.
if isinstance(field_value, list):
values = []
for linked_row in field_value:
# Make sure we're actually iterating on links.
if not isinstance(linked_row, dict):
raise ShotgunError("Invalid deep query field %s.%s" % (entity_type, field))
# Skips entities that are not of the requested type.
if linked_row["type"] != entity_type2:
continue
entity = self._db[linked_row["type"]][linked_row["id"]]
sub_field_value = self._get_field_from_row(entity_type2, entity, field3)
values.append(sub_field_value)
return values
# The field is not set, so return None.
elif field_value is None:
return None
# not multi entity, must be entity.
elif not isinstance(field_value, dict):
raise ShotgunError("Invalid deep query field %s.%s" % (entity_type, field))
# make sure that types in the query match type in the linked field
if entity_type2 != field_value["type"]:
raise ShotgunError("Deep query field %s.%s does not match type "
"with data %s" % (entity_type, field, field_value))
# ok so looks like the value is an entity link
# e.g. db contains: {"sg_sequence": {"type":"Sequence", "id": 123 } }
linked_row = self._db[field_value["type"]][field_value["id"]]
return self._get_field_from_row(entity_type2, linked_row, field3)
else:
# sg returns none for unknown stuff
return None
except ValueError:
# this is not a deep-linked field - just something like "code"
if field in row:
return row[field]
else:
# sg returns none for unknown stuff
return None
def _get_field_type(self, entity_type, field):
# split dotted form fields
try:
field2, entity_type2, field3 = field.split(".", 2)
return self._get_field_type(entity_type2, field3)
except ValueError:
return self._schema[entity_type][field]["data_type"]["value"]
def _row_matches_filter(self, entity_type, row, sg_filter, retired_only):
try:
field, operator, rval = sg_filter
except ValueError:
raise ShotgunError("Filters must be in the form [lval, operator, rval]")
# Special case, field is None when we have a filter operator.
if field is None:
if operator in ["any", "all"]:
return self._row_matches_filters(entity_type, row, rval, operator, retired_only)
else:
raise ShotgunError("Unknown filter_operator type: %s" % operator)
else:
lval = self._get_field_from_row(entity_type, row, field)
field_type = self._get_field_type(entity_type, field)
# if we're operating on an entity, we'll need to grab the name from the lval's row
if field_type == "entity":
# If the entity field is set, we'll retrieve the name of the entity.
if lval is not None:
link_type = lval["type"]
link_id = lval["id"]
lval_row = self._db[link_type][link_id]
if "name" in lval_row:
lval["name"] = lval_row["name"]
elif "code" in lval_row:
lval["name"] = lval_row["code"]
return self._compare(field_type, lval, operator, rval)
def _rearrange_filters(self, filters):
"""
Modifies the filter syntax to turn it into a list of three items regardless
of the actual filter. Most of the filters are list of three elements, so this doesn't change much.
The filter_operator syntax uses a dictionary with two keys, "filters" and
"filter_operator". Filters using this syntax will be turned into
[None, filter["filter_operator"], filter["filters"]]
Filters of the form [field, operator, values....] will be turned into
[field, operator, [values...]].
:param list filters: List of filters to rearrange.
:returns: A list of three items.
"""
rearranged_filters = []
# now translate ["field", "in", 2,3,4] --> ["field", "in", [2, 3, 4]]
for f in filters:
if isinstance(f, list):
if len(f) > 3:
# ["field", "in", 2,3,4] --> ["field", "in", [2, 3, 4]]
new_filter = [f[0], f[1], f[2:]]
elif f[1] == "in" and not isinstance(f[2], list):
# ["field", "in", 2] --> ["field", "in", [2]]
new_filter = [f[0], f[1], [f[2]]]
else:
new_filter = f
elif isinstance(f, dict):
if "filter_operator" not in f or "filters" not in f:
raise ShotgunError(
"Bad filter operator, requires keys 'filter_operator' and 'filters', "
"found %s" % ", ".join(f.keys())
)
new_filter = [None, f["filter_operator"], f["filters"]]
else:
raise ShotgunError(
"Filters can only be lists or dictionaries, not %s." % type(f).__name__
)
rearranged_filters.append(new_filter)
return rearranged_filters
def _row_matches_filters(self, entity_type, row, filters, filter_operator, retired_only):
filters = self._rearrange_filters(filters)
if retired_only and not row["__retired"] or not retired_only and row["__retired"]:
# ignore retired rows unless the retired_only flag is set
# ignore live rows if the retired_only flag is set
return False
elif filter_operator in ("all", None):
return all(self._row_matches_filter(entity_type, row, filter, retired_only) for filter in filters)
elif filter_operator == "any":
return any(self._row_matches_filter(entity_type, row, filter, retired_only) for filter in filters)
else:
raise ShotgunError("%s is not a valid filter operator" % filter_operator)
def _update_row(self, entity_type, row, data):
for field in data:
field_type = self._get_field_type(entity_type, field)
if field_type == "entity" and data[field]:
row[field] = {"type": data[field]["type"], "id": data[field]["id"]}
elif field_type == "multi_entity":
row[field] = [{"type": item["type"], "id": item["id"]} for item in data[field]]
else:
row[field] = data[field]
def _validate_entity_exists(self, entity_type, entity_id):
if entity_id not in self._db[entity_type]:
raise ShotgunError("No entity of type %s exists with id %s" % (entity_type, entity_id))
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "sqlalchemy_bigquery", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
# We're using two Python versions to test with sqlalchemy 1.3 and 1.4.
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8", "3.9"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"lint",
"unit",
"cover",
"system",
"compliance",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.stop_on_first_error = True
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "sqlalchemy_bigquery", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install(
"mock",
"asyncmock",
"pytest",
"pytest-cov",
"pytest-asyncio",
"-c",
constraints_path,
)
if session.python == "3.8":
extras = "[tests,alembic]"
elif session.python == "3.9":
extras = "[tests,geography]"
else:
extras = "[tests]"
session.install("-e", f".{extras}", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=sqlalchemy_bigquery",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
if session.python == "3.8":
extras = "[tests,alembic]"
elif session.python == "3.9":
extras = "[tests,geography]"
else:
extras = "[tests]"
session.install("-e", f".{extras}", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def compliance(session):
"""Run the SQLAlchemy dialect-compliance system tests"""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_folder_path = os.path.join("tests", "sqlalchemy_dialect_compliance")
if os.environ.get("RUN_COMPLIANCE_TESTS", "true") == "false":
session.skip("RUN_COMPLIANCE_TESTS is set to false, skipping")
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
if not os.path.exists(system_test_folder_path):
session.skip("Compliance tests were not found")
session.install("--pre", "grpcio")
session.install(
"mock",
"pytest",
"pytest-rerunfailures",
"google-cloud-testutils",
"-c",
constraints_path,
)
if session.python == "3.8":
extras = "[tests,alembic]"
elif session.python == "3.9":
extras = "[tests,geography]"
else:
extras = "[tests]"
session.install("-e", f".{extras}", "-c", constraints_path)
session.run(
"py.test",
"-vv",
f"--junitxml=compliance_{session.python}_sponge_log.xml",
"--reruns=3",
"--reruns-delay=60",
"--only-rerun=403 Exceeded rate limits",
"--only-rerun=409 Already Exists",
"--only-rerun=404 Not found",
"--only-rerun=400 Cannot execute DML over a non-existent table",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "geoalchemy2", "shapely", "recommonmark"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1",
"alabaster",
"geoalchemy2",
"shapely",
"recommonmark",
"gcp-sphinx-docfx-yaml",
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| |
"""
Unit tests for django-invitation.
These tests assume that you've completed all the prerequisites for
getting django-invitation running in the default setup, to wit:
1. You have ``invitation`` in your ``INSTALLED_APPS`` setting.
2. You have created all of the templates mentioned in this
application's documentation.
3. You have added the setting ``ACCOUNT_INVITATION_DAYS`` to your
settings file.
4. You have URL patterns pointing to the invitation views.
"""
import datetime
import sha
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core import management
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.sites.models import Site
from invitation import forms
from invitation.models import InvitationKey, InvitationUser
allauth_installed = False
try:
if 'allauth' in settings.INSTALLED_APPS:
allauth_installed = True
print "** allauth installed **"
from allauth.socialaccount.models import SocialApp
except:
pass
registration_installed = False
try:
import registration
if 'registration' in settings.INSTALLED_APPS:
registration_installed = True
print "** registration installed **"
except:
pass
class InvitationTestCase(TestCase):
"""
Base class for the test cases.
This sets up one user and two keys -- one expired, one not -- which are
used to exercise various parts of the application.
"""
def setUp(self):
self.sample_user = User.objects.create_user(username='alice',
password='secret',
email='alice@example.com')
self.sample_key = InvitationKey.objects.create_invitation(user=self.sample_user)
self.expired_key = InvitationKey.objects.create_invitation(user=self.sample_user)
self.expired_key.date_invited -= datetime.timedelta(days=settings.ACCOUNT_INVITATION_DAYS + 1)
self.expired_key.save()
self.sample_registration_data = {
'invitation_key': self.sample_key.key,
'username': 'new_user',
'email': 'newbie@example.com',
'password1': 'secret',
'password2': 'secret',
'tos': '1'}
self.sample_allauth_data = {
'username': 'new_user',
'email': 'newbie@example.com',
'password1': 'secret',
'password2': 'secret',
'terms_and_conds': '1'}
def assertRedirect(self, response, viewname):
"""Assert that response has been redirected to ``viewname``."""
self.assertEqual(response.status_code, 302)
expected_location = 'http://testserver' + reverse(viewname)
self.assertEqual(response['Location'], expected_location)
def tearDown(self):
pass
class InvitationTestCaseRegistration(InvitationTestCase):
def setUp(self):
super(InvitationTestCaseRegistration, self).setUp()
self.saved_invitation_use_allauth = settings.INVITATION_USE_ALLAUTH
settings.INVITATION_USE_ALLAUTH = False
self.saved_socialaccount_providers = settings.SOCIALACCOUNT_PROVIDERS
settings.SOCIALACCOUNT_PROVIDERS = {}
def tearDown(self):
super(InvitationTestCaseRegistration, self).tearDown()
settings.INVITATION_USE_ALLAUTH = self.saved_invitation_use_allauth
settings.SOCIALACCOUNT_PROVIDERS = self.saved_socialaccount_providers
class InvitationTestCaseAllauth(InvitationTestCase):
def setUp(self):
super(InvitationTestCaseAllauth, self).setUp()
self.saved_invitation_use_allauth = settings.INVITATION_USE_ALLAUTH
settings.INVITATION_USE_ALLAUTH = True
self.saved_socialaccount_providers = settings.SOCIALACCOUNT_PROVIDERS
settings.SOCIALACCOUNT_PROVIDERS = {}
self.facebook_app = SocialApp(site=Site.objects.get_current(), provider='facebook', name='test', key='abc', secret='def')
self.facebook_app.save()
def tearDown(self):
super(InvitationTestCaseAllauth, self).tearDown()
settings.INVITATION_USE_ALLAUTH = self.saved_invitation_use_allauth
settings.SOCIALACCOUNT_PROVIDERS = self.saved_socialaccount_providers
class InvitationModelTests(InvitationTestCase):
"""
Tests for the model-oriented functionality of django-invitation.
"""
def test_invitation_key_created(self):
"""
Test that a ``InvitationKey`` is created for a new key.
"""
self.assertEqual(InvitationKey.objects.count(), 2)
def test_invitation_email(self):
"""
Test that ``InvitationKey.send_to`` sends an invitation email.
"""
self.sample_key.send_to('bob@example.com')
self.assertEqual(len(mail.outbox), 1)
def test_key_expiration_condition(self):
"""
Test that ``InvitationKey.key_expired()`` returns ``True`` for expired
keys, and ``False`` otherwise.
"""
# Unexpired user returns False.
self.failIf(self.sample_key.key_expired())
# Expired user returns True.
self.failUnless(self.expired_key.key_expired())
def test_expired_user_deletion(self):
"""
Test ``InvitationKey.objects.delete_expired_keys()``.
Only keys whose expiration date has passed are deleted by
delete_expired_keys.
"""
InvitationKey.objects.delete_expired_keys()
self.assertEqual(InvitationKey.objects.count(), 1)
def test_management_command(self):
"""
Test that ``manage.py cleanupinvitation`` functions correctly.
"""
management.call_command('cleanupinvitation')
self.assertEqual(InvitationKey.objects.count(), 1)
def test_invitations_remaining(self):
"""Test InvitationUser calculates remaining invitations properly."""
remaining_invites = InvitationKey.objects.remaining_invitations_for_user
# New user starts with settings.INVITATIONS_PER_USER
user = User.objects.create_user(username='newbie',
password='secret',
email='newbie@example.com')
self.assertEqual(remaining_invites(user), settings.INVITATIONS_PER_USER)
# After using some, amount remaining is decreased
used = InvitationKey.objects.filter(from_user=self.sample_user).count()
expected_remaining = settings.INVITATIONS_PER_USER - used
remaining = remaining_invites(self.sample_user)
self.assertEqual(remaining, expected_remaining)
# Using Invitationuser via Admin, remaining can be increased
invitation_user = InvitationUser.objects.get(inviter=self.sample_user)
new_remaining = 2*settings.INVITATIONS_PER_USER + 1
invitation_user.invitations_remaining = new_remaining
invitation_user.save()
remaining = remaining_invites(self.sample_user)
self.assertEqual(remaining, new_remaining)
# If no InvitationUser (for pre-existing/legacy User), one is created
old_sample_user = User.objects.create_user(username='lewis',
password='secret',
email='lewis@example.com')
old_sample_user.invitationuser_set.all().delete()
self.assertEqual(old_sample_user.invitationuser_set.count(), 0)
remaining = remaining_invites(old_sample_user)
self.assertEqual(remaining, settings.INVITATIONS_PER_USER)
class InvitationFormTests(InvitationTestCase):
"""
Tests for the forms and custom validation logic included in
django-invitation.
"""
def setUp(self):
super(InvitationFormTests, self).setUp()
self.saved_invitation_blacklist = settings.INVITATION_BLACKLIST
settings.INVITATION_BLACKLIST = ( '@mydomain.com', )
def tearDown(self):
settings.INVITATION_BLACKLIST = self.saved_invitation_blacklist
super(InvitationFormTests, self).tearDown()
def test_invalid_invitation_form(self):
"""
Test that ``InvitationKeyForm`` enforces email constraints.
"""
invalid_data_dicts = [
# Invalid email.
{
'data': { 'email': 'example.com' },
'error': ('email', [u"Enter a valid e-mail address."])
},
{
'data': {'email': 'an_address@mydomain.com'},
'error': ('email', [u"Thanks, but there's no need to invite us!"])
}
]
for invalid_dict in invalid_data_dicts:
form = forms.InvitationKeyForm(data=invalid_dict['data'], remaining_invitations=1)
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1])
def test_invitation_form(self):
form = forms.InvitationKeyForm(data={ 'email': 'foo@example.com' ,}, remaining_invitations=1 )
print form.errors
self.failUnless(form.is_valid())
class InvitationViewTestsRegistration(InvitationTestCaseRegistration):
"""
Tests for the views included in django-invitation when django-registration is used as the backend.
"""
def test_invitation_view(self):
"""
Test that the invitation view rejects invalid submissions,
and creates a new key and redirects after a valid submission.
"""
# You need to be logged in to send an invite.
response = self.client.login(username='alice', password='secret')
remaining_invitations = InvitationKey.objects.remaining_invitations_for_user(self.sample_user)
# Invalid email data fails.
response = self.client.post(reverse('invitation_invite'),
data={ 'email': 'example.com' })
self.assertEqual(response.status_code, 200)
self.failUnless(response.context['form'])
self.failUnless(response.context['form'].errors)
# Valid email data succeeds.
response = self.client.post(reverse('invitation_invite'),
data={ 'email': 'foo@example.com' })
self.assertRedirect(response, 'invitation_complete')
self.assertEqual(InvitationKey.objects.count(), 3)
self.assertEqual(InvitationKey.objects.remaining_invitations_for_user(self.sample_user), remaining_invitations-1)
# Once remaining invitations exhausted, you fail again.
while InvitationKey.objects.remaining_invitations_for_user(self.sample_user) > 0:
self.client.post(reverse('invitation_invite'),
data={'email': 'foo@example.com'})
self.assertEqual(InvitationKey.objects.remaining_invitations_for_user(self.sample_user), 0)
response = self.client.post(reverse('invitation_invite'),
data={'email': 'foo@example.com'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['remaining_invitations'], 0)
self.failUnless(response.context['form'])
def test_invited_view(self):
"""
Test that the invited view invite the user from a valid
key and fails if the key is invalid or has expired.
"""
# Valid key puts use the invited template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': self.sample_key.key }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/invited.html')
# Expired key use the wrong key template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': self.expired_key.key }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/wrong_invitation_key.html')
# Invalid key use the wrong key template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': 'foo' }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/wrong_invitation_key.html')
# Nonexistent key use the wrong key template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': sha.new('foo').hexdigest() }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/wrong_invitation_key.html')
def test_register_view(self):
"""
Test that after registration a key cannot be reused.
"""
# This won't work if registration isn't installed
if not registration_installed:
print "** Skipping test requiring django-registration **"
return
# The first use of the key to register a new user works.
registration_data = self.sample_registration_data.copy()
response = self.client.post(reverse('registration_register'),
data=registration_data)
self.assertRedirect(response, 'registration_complete')
user = User.objects.get(username='new_user')
key = InvitationKey.objects.get_key(self.sample_key.key)
self.assertEqual(user, key.registrant)
# Trying to reuse the same key then fails.
registration_data['username'] = 'even_newer_user'
response = self.client.post(reverse('registration_register'),
data=registration_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'invitation/wrong_invitation_key.html')
try:
even_newer_user = User.objects.get(username='even_newer_user')
self.fail("Invitation already used - No user should be created.")
except User.DoesNotExist:
pass
class InvitationViewTestsAllauth(InvitationTestCaseAllauth):
"""
Tests for the views included in django-invitation when django-allauth is used as the backend.
"""
def test_invitation_view(self):
"""
Test that the invitation view rejects invalid submissions,
and creates a new key and redirects after a valid submission.
"""
# You need to be logged in to send an invite.
response = self.client.login(username='alice', password='secret')
remaining_invitations = InvitationKey.objects.remaining_invitations_for_user(self.sample_user)
# Invalid email data fails.
response = self.client.post(reverse('invitation_invite'),
data={ 'email': 'example.com' })
self.assertEqual(response.status_code, 200)
self.failUnless(response.context['form'])
self.failUnless(response.context['form'].errors)
# Valid email data succeeds.
response = self.client.post(reverse('invitation_invite'),
data={ 'email': 'foo@example.com' })
self.assertRedirect(response, 'invitation_complete')
self.assertEqual(InvitationKey.objects.count(), 3)
self.assertEqual(InvitationKey.objects.remaining_invitations_for_user(self.sample_user), remaining_invitations-1)
# Once remaining invitations exhausted, you fail again.
while InvitationKey.objects.remaining_invitations_for_user(self.sample_user) > 0:
self.client.post(reverse('invitation_invite'),
data={'email': 'foo@example.com'})
self.assertEqual(InvitationKey.objects.remaining_invitations_for_user(self.sample_user), 0)
response = self.client.post(reverse('invitation_invite'),
data={'email': 'foo@example.com'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['remaining_invitations'], 0)
self.failUnless(response.context['form'])
def test_invited_view(self):
"""
Test that the invited view invite the user from a valid
key and fails if the key is invalid or has expired.
"""
# Valid key puts use the invited template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': self.sample_key.key }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/invited.html')
# Expired key use the wrong key template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': self.expired_key.key }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/wrong_invitation_key.html')
# Invalid key use the wrong key template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': 'foo' }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/wrong_invitation_key.html')
# Nonexistent key use the wrong key template.
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': sha.new('foo').hexdigest() }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'invitation/wrong_invitation_key.html')
def test_register_view(self):
"""
Test that after registration a key cannot be reused.
"""
# This won't work if registration isn't installed
if not allauth_installed:
print "** Skipping test requiring django-allauth **"
return
# The first use of the key to register a new user works.
registration_data = self.sample_allauth_data.copy()
# User has to go through 'invited' first
response = self.client.get(reverse('invitation_invited', kwargs={'invitation_key' : self.sample_key.key }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'invitation/invited.html')
# If the key gets approved it should be stored in the session
self.assertIn('invitation_key', self.client.session)
response = self.client.post(reverse('account_signup'), data=registration_data)
self.assertEqual(response.status_code, 302)
# Check that the key has been removed from the session data
self.assertNotIn('invitation_key', self.client.session)
# self.assertRedirect(response, 'registration_complete')
user = User.objects.get(username='new_user')
key = InvitationKey.objects.get_key(self.sample_key.key)
self.assertIn(key, user.invitations_used.all())
# Trying to reuse the same key then fails.
registration_data['username'] = 'even_newer_user'
response = self.client.post(reverse('invitation_invited', kwargs={'invitation_key' : self.sample_key.key }))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'invitation/wrong_invitation_key.html')
class InviteModeOffTestsRegistration(InvitationTestCaseRegistration):
"""
Tests for the case where INVITE_MODE is False and django-registration is used as the backend.
(The test cases other than this one generally assume that INVITE_MODE is
True.)
"""
def setUp(self):
super(InviteModeOffTestsRegistration, self).setUp()
self.saved_invite_mode = settings.INVITE_MODE
settings.INVITE_MODE = False
self.saved_socialaccount_providers = settings.SOCIALACCOUNT_PROVIDERS
settings.SOCIALACCOUNT_PROVIDERS = {}
def tearDown(self):
settings.INVITE_MODE = self.saved_invite_mode
settings.SOCIALACCOUNT_PROVIDERS = self.saved_socialaccount_providers
super(InviteModeOffTestsRegistration, self).tearDown()
def test_invited_view(self):
"""
Test that the invited view redirects to registration_register.
"""
# This won't work if registration isn't installed
if not registration_installed:
print "** Skipping test requiring django-registration **"
return
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': self.sample_key.key }))
self.assertRedirect(response, 'registration_register')
def test_register_view(self):
"""
Test register view.
With INVITE_MODE = FALSE, django-invitation just passes this view on to
django-registration's register.
"""
# This won't work if registration isn't installed
if not registration_installed:
return
# get
response = self.client.get(reverse('registration_register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/registration_form.html')
# post
response = self.client.post(reverse('registration_register'),
data=self.sample_registration_data)
self.assertRedirect(response, 'registration_complete')
class InviteModeOffTestsAllauth(InvitationTestCaseAllauth):
"""
Tests for the case where INVITE_MODE is False and django-allauth is used as the backend.
(The test cases other than this one generally assume that INVITE_MODE is
True.)
"""
def setUp(self):
super(InviteModeOffTestsAllauth, self).setUp()
self.saved_invite_mode = settings.INVITE_MODE
settings.INVITE_MODE = False
self.saved_socialaccount_providers = settings.SOCIALACCOUNT_PROVIDERS
settings.SOCIALACCOUNT_PROVIDERS = {}
def tearDown(self):
settings.INVITE_MODE = self.saved_invite_mode
settings.SOCIALACCOUNT_PROVIDERS = self.saved_socialaccount_providers
super(InviteModeOffTestsAllauth, self).tearDown()
def test_invited_view(self):
"""
Test that the invited view redirects to registration_register.
"""
# This won't work if registration isn't installed
if not allauth_installed:
print "** Skipping test requiring django-allauth **"
return
response = self.client.get(reverse('invitation_invited',
kwargs={ 'invitation_key': self.sample_key.key }))
self.assertRedirect(response, 'registration_register')
def test_register_view(self):
"""
Test register view.
With INVITE_MODE = FALSE, django-invitation just passes this view on to
django-registration's register.
"""
# This won't work if registration isn't installed
if not allauth_installed:
print "** Skipping test requiring django-allauth **"
return
# TODO fix this. But for now I'm not going to bother since we could simply remove
# invitation altogether if we don't want to use the invitation code, or bypass the URL somehow.
response = self.client.get(reverse('registration_register'))
self.assertEqual(response.status_code, 302)
self.assertTemplateUsed(response, 'account/signup.html')
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.tests import base
class BaseChild(interface.LinuxInterfaceDriver):
def plug_new(*args):
pass
def unplug(*args):
pass
class FakeNetwork(object):
id = '12345678-1234-5678-90ab-ba0987654321'
class FakeSubnet(object):
cidr = '192.168.1.1/24'
class FakeAllocation(object):
subnet = FakeSubnet()
ip_address = '192.168.1.2'
ip_version = 4
class FakePort(object):
id = 'abcdef01-1234-5678-90ab-ba0987654321'
fixed_ips = [FakeAllocation]
device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
network = FakeNetwork()
network_id = network.id
class TestBase(base.BaseTestCase):
def setUp(self):
super(TestBase, self).setUp()
self.conf = config.setup_conf()
self.conf.register_opts(interface.OPTS)
self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice')
self.ip_dev = self.ip_dev_p.start()
self.ip_p = mock.patch.object(ip_lib, 'IPWrapper')
self.ip = self.ip_p.start()
self.device_exists_p = mock.patch.object(ip_lib, 'device_exists')
self.device_exists = self.device_exists_p.start()
class TestABCDriver(TestBase):
def setUp(self):
super(TestABCDriver, self).setUp()
mock_link_addr = mock.PropertyMock(return_value='aa:bb:cc:dd:ee:ff')
type(self.ip_dev().link).address = mock_link_addr
def test_get_device_name(self):
bc = BaseChild(self.conf)
device_name = bc.get_device_name(FakePort())
self.assertEqual('tapabcdef01-12', device_name)
def test_init_router_port(self):
addresses = [dict(scope='global',
dynamic=False, cidr='172.16.77.240/24')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
self.ip_dev().route.list_onlink_routes.return_value = []
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns,
extra_subnets=[{'cidr': '172.20.0.0/24'}])
self.ip_dev.assert_has_calls(
[mock.call('tap0', namespace=ns),
mock.call().addr.list(filters=['permanent']),
mock.call().addr.add('192.168.1.2/24'),
mock.call().addr.delete('172.16.77.240/24'),
mock.call('tap0', namespace=ns),
mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
mock.call().route.add_onlink_route('172.20.0.0/24')])
def test_init_router_port_delete_onlink_routes(self):
addresses = [dict(scope='global',
dynamic=False, cidr='172.16.77.240/24')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
self.ip_dev().route.list_onlink_routes.return_value = [
{'cidr': '172.20.0.0/24'}]
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns)
self.ip_dev.assert_has_calls(
[mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
mock.call().route.delete_onlink_route('172.20.0.0/24')])
def test_l3_init_with_preserve(self):
addresses = [dict(scope='global',
dynamic=False, cidr='192.168.1.3/32')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns,
preserve_ips=['192.168.1.3/32'])
self.ip_dev.assert_has_calls(
[mock.call('tap0', namespace=ns),
mock.call().addr.list(filters=['permanent']),
mock.call().addr.add('192.168.1.2/24')])
self.assertFalse(self.ip_dev().addr.delete.called)
self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
def _test_l3_init_clean_connections(self, clean_connections):
addresses = [
dict(scope='global', dynamic=False, cidr='10.0.0.1/24'),
dict(scope='global', dynamic=False, cidr='10.0.0.3/32')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0', ['10.0.0.1/24'], namespace=ns,
clean_connections=clean_connections)
delete = self.ip_dev().delete_addr_and_conntrack_state
if clean_connections:
delete.assert_called_once_with('10.0.0.3/32')
else:
self.assertFalse(delete.called)
def test_l3_init_with_clean_connections(self):
self._test_l3_init_clean_connections(True)
def test_l3_init_without_clean_connections(self):
self._test_l3_init_clean_connections(False)
def _test_init_router_port_with_ipv6(self, include_gw_ip):
addresses = [dict(scope='global',
dynamic=False,
cidr='2001:db8:a::123/64')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
self.ip_dev().route.list_onlink_routes.return_value = []
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
new_cidr = '2001:db8:a::124/64'
kwargs = {'namespace': ns,
'extra_subnets': [{'cidr': '2001:db8:b::/64'}]}
if include_gw_ip:
kwargs['gateway_ips'] = ['2001:db8:a::1']
bc.init_router_port('tap0', [new_cidr], **kwargs)
expected_calls = (
[mock.call('tap0', namespace=ns),
mock.call().addr.list(filters=['permanent']),
mock.call().addr.add('2001:db8:a::124/64'),
mock.call().addr.delete('2001:db8:a::123/64')])
if include_gw_ip:
expected_calls += (
[mock.call().route.add_gateway('2001:db8:a::1')])
expected_calls += (
[mock.call('tap0', namespace=ns),
mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
mock.call().route.add_onlink_route('2001:db8:b::/64')])
self.ip_dev.assert_has_calls(expected_calls)
def test_init_router_port_ipv6_with_gw_ip(self):
self._test_init_router_port_with_ipv6(include_gw_ip=True)
def test_init_router_port_ipv6_without_gw_ip(self):
self._test_init_router_port_with_ipv6(include_gw_ip=False)
def test_init_router_port_ext_gw_with_dual_stack(self):
old_addrs = [dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24'),
dict(ip_version=6, scope='global',
dynamic=False, cidr='2001:db8:a::123/64')]
self.ip_dev().addr.list = mock.Mock(return_value=old_addrs)
self.ip_dev().route.list_onlink_routes.return_value = []
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
new_cidrs = ['192.168.1.2/24', '2001:db8:a::124/64']
bc.init_router_port('tap0', new_cidrs, namespace=ns,
extra_subnets=[{'cidr': '172.20.0.0/24'}])
self.ip_dev.assert_has_calls(
[mock.call('tap0', namespace=ns),
mock.call().addr.list(filters=['permanent']),
mock.call().addr.add('192.168.1.2/24'),
mock.call().addr.add('2001:db8:a::124/64'),
mock.call().addr.delete('172.16.77.240/24'),
mock.call().addr.delete('2001:db8:a::123/64'),
mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
mock.call().route.add_onlink_route('172.20.0.0/24')],
any_order=True)
def test_init_router_port_with_ipv6_delete_onlink_routes(self):
addresses = [dict(scope='global',
dynamic=False, cidr='2001:db8:a::123/64')]
route = '2001:db8:a::/64'
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
self.ip_dev().route.list_onlink_routes.return_value = [{'cidr': route}]
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_router_port('tap0', ['2001:db8:a::124/64'], namespace=ns)
self.ip_dev.assert_has_calls(
[mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
mock.call().route.delete_onlink_route(route)])
def test_l3_init_with_duplicated_ipv6(self):
addresses = [dict(scope='global',
dynamic=False,
cidr='2001:db8:a::123/64')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0', ['2001:db8:a::123/64'], namespace=ns)
self.assertFalse(self.ip_dev().addr.add.called)
def test_l3_init_with_duplicated_ipv6_uncompact(self):
addresses = [dict(scope='global',
dynamic=False,
cidr='2001:db8:a::123/64')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0',
['2001:db8:a:0000:0000:0000:0000:0123/64'],
namespace=ns)
self.assertFalse(self.ip_dev().addr.add.called)
def test_add_ipv6_addr(self):
device_name = 'tap0'
cidr = '2001:db8::/64'
ns = '12345678-1234-5678-90ab-ba0987654321'
bc = BaseChild(self.conf)
bc.add_ipv6_addr(device_name, cidr, ns)
self.ip_dev.assert_has_calls(
[mock.call(device_name, namespace=ns),
mock.call().addr.add(cidr, 'global')])
def test_delete_ipv6_addr(self):
device_name = 'tap0'
cidr = '2001:db8::/64'
ns = '12345678-1234-5678-90ab-ba0987654321'
bc = BaseChild(self.conf)
bc.delete_ipv6_addr(device_name, cidr, ns)
self.ip_dev.assert_has_calls(
[mock.call(device_name, namespace=ns),
mock.call().delete_addr_and_conntrack_state(cidr)])
def test_delete_ipv6_addr_with_prefix(self):
device_name = 'tap0'
prefix = '2001:db8::/48'
in_cidr = '2001:db8::/64'
out_cidr = '2001:db7::/64'
ns = '12345678-1234-5678-90ab-ba0987654321'
in_addresses = [dict(scope='global',
dynamic=False,
cidr=in_cidr)]
out_addresses = [dict(scope='global',
dynamic=False,
cidr=out_cidr)]
# Initially set the address list to be empty
self.ip_dev().addr.list = mock.Mock(return_value=[])
bc = BaseChild(self.conf)
# Call delete_v6addr_with_prefix when the address list is empty
bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
# Assert that delete isn't called
self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
# Set the address list to contain only an address outside of the range
# of the given prefix
self.ip_dev().addr.list = mock.Mock(return_value=out_addresses)
bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
# Assert that delete isn't called
self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
# Set the address list to contain only an address inside of the range
# of the given prefix
self.ip_dev().addr.list = mock.Mock(return_value=in_addresses)
bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
# Assert that delete is called
self.ip_dev.assert_has_calls(
[mock.call(device_name, namespace=ns),
mock.call().addr.list(scope='global', filters=['permanent']),
mock.call().delete_addr_and_conntrack_state(in_cidr)])
def test_get_ipv6_llas(self):
ns = '12345678-1234-5678-90ab-ba0987654321'
addresses = [dict(scope='link',
dynamic=False,
cidr='fe80:cafe::/64')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
device_name = self.ip_dev().name
bc = BaseChild(self.conf)
llas = bc.get_ipv6_llas(device_name, ns)
self.assertEqual(addresses, llas)
self.ip_dev.assert_has_calls(
[mock.call(device_name, namespace=ns),
mock.call().addr.list(scope='link', ip_version=6)])
class TestOVSInterfaceDriver(TestBase):
def test_get_device_name(self):
br = interface.OVSInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('tapabcdef01-12', device_name)
def test_plug_no_ns(self):
self._test_plug()
def test_plug_with_ns(self):
self._test_plug(namespace='01234567-1234-1234-99')
def test_plug_alt_bridge(self):
self._test_plug(bridge='br-foo')
def test_plug_configured_bridge(self):
br = 'br-v'
self.conf.set_override('ovs_use_veth', False)
self.conf.set_override('ovs_integration_bridge', br)
self.assertEqual(self.conf.ovs_integration_bridge, br)
def device_exists(dev, namespace=None):
return dev == br
ovs = interface.OVSInterfaceDriver(self.conf)
with mock.patch.object(ovs, '_ovs_add_port') as add_port:
self.device_exists.side_effect = device_exists
ovs.plug('01234567-1234-1234-99',
'port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff',
bridge=None,
namespace=None)
add_port.assert_called_once_with('br-v',
'tap0',
'port-1234',
'aa:bb:cc:dd:ee:ff',
internal=True)
def _test_plug(self, additional_expectation=[], bridge=None,
namespace=None):
if not bridge:
bridge = 'br-int'
def device_exists(dev, namespace=None):
return dev == bridge
with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace:
ovs = interface.OVSInterfaceDriver(self.conf)
self.device_exists.side_effect = device_exists
ovs.plug('01234567-1234-1234-99',
'port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff',
bridge=bridge,
namespace=namespace)
replace.assert_called_once_with(
'tap0',
('type', 'internal'),
('external_ids', {
'iface-id': 'port-1234',
'iface-status': 'active',
'attached-mac': 'aa:bb:cc:dd:ee:ff'}))
expected = [mock.call(),
mock.call().device('tap0'),
mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')]
expected.extend(additional_expectation)
if namespace:
expected.extend(
[mock.call().ensure_namespace(namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)])
expected.extend([mock.call().device().link.set_up()])
self.ip.assert_has_calls(expected)
def test_mtu_int(self):
self.assertIsNone(self.conf.network_device_mtu)
self.conf.set_override('network_device_mtu', 9000)
self.assertEqual(self.conf.network_device_mtu, 9000)
def test_validate_min_ipv6_mtu(self):
self.conf.set_override('network_device_mtu', 1200)
with mock.patch('neutron.common.ipv6_utils.is_enabled') as ipv6_status:
with testtools.ExpectedException(SystemExit):
ipv6_status.return_value = True
BaseChild(self.conf)
def test_plug_mtu(self):
self.conf.set_override('network_device_mtu', 9000)
self._test_plug([mock.call().device().link.set_mtu(9000)])
def test_unplug(self, bridge=None):
if not bridge:
bridge = 'br-int'
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br:
ovs = interface.OVSInterfaceDriver(self.conf)
ovs.unplug('tap0')
ovs_br.assert_has_calls([mock.call(bridge),
mock.call().delete_port('tap0')])
class TestOVSInterfaceDriverWithVeth(TestOVSInterfaceDriver):
def setUp(self):
super(TestOVSInterfaceDriverWithVeth, self).setUp()
self.conf.set_override('ovs_use_veth', True)
def test_get_device_name(self):
br = interface.OVSInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_with_prefix(self):
self._test_plug(devname='qr-0', prefix='qr-')
def _test_plug(self, devname=None, bridge=None, namespace=None,
prefix=None, mtu=None):
if not devname:
devname = 'ns-0'
if not bridge:
bridge = 'br-int'
def device_exists(dev, namespace=None):
return dev == bridge
ovs = interface.OVSInterfaceDriver(self.conf)
self.device_exists.side_effect = device_exists
root_dev = mock.Mock()
ns_dev = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev))
expected = [mock.call(),
mock.call().add_veth('tap0', devname,
namespace2=namespace)]
with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace:
ovs.plug('01234567-1234-1234-99',
'port-1234',
devname,
'aa:bb:cc:dd:ee:ff',
bridge=bridge,
namespace=namespace,
prefix=prefix)
replace.assert_called_once_with(
'tap0',
('external_ids', {
'iface-id': 'port-1234',
'iface-status': 'active',
'attached-mac': 'aa:bb:cc:dd:ee:ff'}))
ns_dev.assert_has_calls(
[mock.call.link.set_address('aa:bb:cc:dd:ee:ff')])
if mtu:
ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
self.ip.assert_has_calls(expected)
root_dev.assert_has_calls([mock.call.link.set_up()])
ns_dev.assert_has_calls([mock.call.link.set_up()])
def test_plug_mtu(self):
self.conf.set_override('network_device_mtu', 9000)
self._test_plug(mtu=9000)
def test_unplug(self, bridge=None):
if not bridge:
bridge = 'br-int'
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br:
ovs = interface.OVSInterfaceDriver(self.conf)
ovs.unplug('ns-0', bridge=bridge)
ovs_br.assert_has_calls([mock.call(bridge),
mock.call().delete_port('tap0')])
self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None),
mock.call().link.delete()])
class TestBridgeInterfaceDriver(TestBase):
def test_get_device_name(self):
br = interface.BridgeInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_no_ns(self):
self._test_plug()
def test_plug_with_ns(self):
self._test_plug(namespace='01234567-1234-1234-99')
def _test_plug(self, namespace=None, mtu=None):
def device_exists(device, namespace=None):
return device.startswith('brq')
root_veth = mock.Mock()
ns_veth = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth))
self.device_exists.side_effect = device_exists
br = interface.BridgeInterfaceDriver(self.conf)
mac_address = 'aa:bb:cc:dd:ee:ff'
br.plug('01234567-1234-1234-99',
'port-1234',
'ns-0',
mac_address,
namespace=namespace)
ip_calls = [mock.call(),
mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)]
ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)])
if mtu:
ns_veth.assert_has_calls([mock.call.link.set_mtu(mtu)])
root_veth.assert_has_calls([mock.call.link.set_mtu(mtu)])
self.ip.assert_has_calls(ip_calls)
root_veth.assert_has_calls([mock.call.link.set_up()])
ns_veth.assert_has_calls([mock.call.link.set_up()])
def test_plug_dev_exists(self):
self.device_exists.return_value = True
with mock.patch('neutron.agent.linux.interface.LOG.info') as log:
br = interface.BridgeInterfaceDriver(self.conf)
br.plug('01234567-1234-1234-99',
'port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff')
self.assertFalse(self.ip_dev.called)
self.assertEqual(log.call_count, 1)
def test_plug_mtu(self):
self.device_exists.return_value = False
self.conf.set_override('network_device_mtu', 9000)
self._test_plug(mtu=9000)
def test_unplug_no_device(self):
self.device_exists.return_value = False
self.ip_dev().link.delete.side_effect = RuntimeError
with mock.patch('neutron.agent.linux.interface.LOG') as log:
br = interface.BridgeInterfaceDriver(self.conf)
br.unplug('tap0')
[mock.call(), mock.call('tap0'), mock.call().link.delete()]
self.assertEqual(log.error.call_count, 1)
def test_unplug(self):
self.device_exists.return_value = True
with mock.patch('neutron.agent.linux.interface.LOG.debug') as log:
br = interface.BridgeInterfaceDriver(self.conf)
br.unplug('tap0')
self.assertEqual(log.call_count, 1)
self.ip_dev.assert_has_calls([mock.call('tap0', namespace=None),
mock.call().link.delete()])
class TestIVSInterfaceDriver(TestBase):
def setUp(self):
super(TestIVSInterfaceDriver, self).setUp()
def test_get_device_name(self):
br = interface.IVSInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_with_prefix(self):
self._test_plug(devname='qr-0', prefix='qr-')
def _test_plug(self, devname=None, namespace=None,
prefix=None, mtu=None):
if not devname:
devname = 'ns-0'
def device_exists(dev, namespace=None):
return dev == 'indigo'
ivs = interface.IVSInterfaceDriver(self.conf)
self.device_exists.side_effect = device_exists
root_dev = mock.Mock()
_ns_dev = mock.Mock()
ns_dev = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_dev, _ns_dev))
self.ip().device = mock.Mock(return_value=(ns_dev))
expected = [mock.call(), mock.call().add_veth('tap0', devname),
mock.call().device(devname)]
ivsctl_cmd = ['ivs-ctl', 'add-port', 'tap0']
with mock.patch.object(utils, 'execute') as execute:
ivs.plug('01234567-1234-1234-99',
'port-1234',
devname,
'aa:bb:cc:dd:ee:ff',
namespace=namespace,
prefix=prefix)
execute.assert_called_once_with(ivsctl_cmd, run_as_root=True)
ns_dev.assert_has_calls(
[mock.call.link.set_address('aa:bb:cc:dd:ee:ff')])
if mtu:
ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
if namespace:
expected.extend(
[mock.call().ensure_namespace(namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)])
self.ip.assert_has_calls(expected)
root_dev.assert_has_calls([mock.call.link.set_up()])
ns_dev.assert_has_calls([mock.call.link.set_up()])
def test_plug_mtu(self):
self.conf.set_override('network_device_mtu', 9000)
self._test_plug(mtu=9000)
def test_plug_namespace(self):
self._test_plug(namespace='mynamespace')
def test_unplug(self):
ivs = interface.IVSInterfaceDriver(self.conf)
ivsctl_cmd = ['ivs-ctl', 'del-port', 'tap0']
with mock.patch.object(utils, 'execute') as execute:
ivs.unplug('ns-0')
execute.assert_called_once_with(ivsctl_cmd, run_as_root=True)
self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None),
mock.call().link.delete()])
class TestMidonetInterfaceDriver(TestBase):
def setUp(self):
self.conf = config.setup_conf()
self.conf.register_opts(interface.OPTS)
self.driver = interface.MidonetInterfaceDriver(self.conf)
self.network_id = uuidutils.generate_uuid()
self.port_id = uuidutils.generate_uuid()
self.device_name = "tap0"
self.mac_address = "aa:bb:cc:dd:ee:ff"
self.bridge = "br-test"
self.namespace = "ns-test"
super(TestMidonetInterfaceDriver, self).setUp()
def test_plug(self):
cmd = ['mm-ctl', '--bind-port', self.port_id, 'tap0']
self.device_exists.return_value = False
root_dev = mock.Mock()
ns_dev = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev))
with mock.patch.object(utils, 'execute') as execute:
self.driver.plug(
self.network_id, self.port_id,
self.device_name, self.mac_address,
self.bridge, self.namespace)
execute.assert_called_once_with(cmd, run_as_root=True)
expected = [mock.call(), mock.call(),
mock.call().add_veth(self.device_name,
self.device_name,
namespace2=self.namespace),
mock.call().ensure_namespace(self.namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)]
ns_dev.assert_has_calls(
[mock.call.link.set_address(self.mac_address)])
root_dev.assert_has_calls([mock.call.link.set_up()])
ns_dev.assert_has_calls([mock.call.link.set_up()])
self.ip.assert_has_calls(expected, True)
def test_unplug(self):
self.driver.unplug(self.device_name, self.bridge, self.namespace)
self.ip_dev.assert_has_calls([
mock.call(self.device_name, namespace=self.namespace),
mock.call().link.delete()])
self.ip.assert_has_calls([mock.call().garbage_collect_namespace()])
| |
from pandac.PandaModules import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.fsm import StateData
from toontown.toon import ToonAvatarPanel
from toontown.friends import ToontownFriendSecret
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
FLPPets = 1
FLPOnline = 2
FLPAll = 3
FLPOnlinePlayers = 4
FLPPlayers = 5
FLPEnemies = 6
globalFriendsList = None
def determineFriendName(friendTuple):
friendName = None
if len(friendTuple) == 2:
avId, flags = friendTuple
playerId = None
showType = 0
elif len(friendTuple) == 3:
avId, flags, playerId = friendTuple
showType = 0
elif len(friendTuple) == 4:
avId, flags, playerId, showType = friendTuple
if showType == 1 and playerId:
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
friendName = playerInfo.playerName
else:
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(avId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(avId)
if handle:
friendName = handle.getName()
return friendName
def compareFriends(f1, f2):
name1 = determineFriendName(f1)
name2 = determineFriendName(f2)
if name1 > name2:
return 1
elif name1 == name2:
return 0
else:
return -1
def showFriendsList():
global globalFriendsList
if globalFriendsList == None:
globalFriendsList = FriendsListPanel()
globalFriendsList.enter()
return
def hideFriendsList():
if globalFriendsList != None:
globalFriendsList.exit()
return
def showFriendsListTutorial():
global globalFriendsList
if globalFriendsList == None:
globalFriendsList = FriendsListPanel()
globalFriendsList.enter()
if not base.cr.isPaid():
globalFriendsList.secrets['state'] = DGG.DISABLED
globalFriendsList.closeCommand = globalFriendsList.close['command']
globalFriendsList.close['command'] = None
return
def hideFriendsListTutorial():
if globalFriendsList != None:
if hasattr(globalFriendsList, 'closeCommand'):
globalFriendsList.close['command'] = globalFriendsList.closeCommand
if not base.cr.isPaid():
globalFriendsList.secrets['state'] = DGG.NORMAL
globalFriendsList.exit()
return
def isFriendsListShown():
if globalFriendsList != None:
return globalFriendsList.isEntered
return 0
def unloadFriendsList():
global globalFriendsList
if globalFriendsList != None:
globalFriendsList.unload()
globalFriendsList = None
return
class FriendsListPanel(DirectFrame, StateData.StateData):
def __init__(self):
self.leftmostPanel = FLPPets
self.rightmostPanel = FLPPlayers
if base.cr.productName in ['DisneyOnline-UK',
'DisneyOnline-AP',
'JP',
'FR',
'BR']:
self.rightmostPanel = FLPAll
DirectFrame.__init__(self, relief=None)
self.listScrollIndex = [0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0]
self.initialiseoptions(FriendsListPanel)
StateData.StateData.__init__(self, 'friends-list-done')
self.friends = {}
self.textRolloverColor = Vec4(1, 1, 0, 1)
self.textDownColor = Vec4(0.5, 0.9, 1, 1)
self.textDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
self.panelType = FLPOnline
return
def load(self):
if self.isLoaded == 1:
return None
self.isLoaded = 1
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
auxGui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.title = DirectLabel(parent=self, relief=None, text='', text_scale=TTLocalizer.FLPtitle, text_fg=(0, 0.1, 0.4, 1), pos=(0.007, 0.0, 0.2))
background_image = gui.find('**/FriendsBox_Open')
self['image'] = background_image
self.setPos(1.1, 0, 0.54)
self.scrollList = DirectScrolledList(parent=self, relief=None, incButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.316), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.117), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(-0.17, 0.0, 0.06), itemFrame_relief=None, numItemsVisible=8, items=[])
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.2, 0, 0)))
clipNP = self.scrollList.attachNewNode(clipper)
self.scrollList.setClipPlane(clipNP)
self.close = DirectButton(parent=self, relief=None, image=(auxGui.find('**/CloseBtn_UP'), auxGui.find('**/CloseBtn_DN'), auxGui.find('**/CloseBtn_Rllvr')), pos=(0.01, 0, -0.38), command=self.__close)
self.left = DirectButton(parent=self, relief=None, image=(gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')), image3_color=Vec4(0.6, 0.6, 0.6, 0.6), pos=(-0.15, 0.0, -0.38), scale=(-1.0, 1.0, 1.0), command=self.__left)
self.right = DirectButton(parent=self, relief=None, image=(gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')), image3_color=Vec4(0.6, 0.6, 0.6, 0.6), pos=(0.17, 0, -0.38), command=self.__right)
self.newFriend = DirectButton(parent=self, relief=None, pos=(-0.14, 0.0, 0.14), image=(auxGui.find('**/Frnds_Btn_UP'), auxGui.find('**/Frnds_Btn_DN'), auxGui.find('**/Frnds_Btn_RLVR')), text=('', TTLocalizer.FriendsListPanelNewFriend, TTLocalizer.FriendsListPanelNewFriend), text_scale=TTLocalizer.FLPnewFriend, text_fg=(0, 0, 0, 1), text_bg=(1, 1, 1, 1), text_pos=(0.1, -0.085), textMayChange=0, command=self.__newFriend)
self.secrets = DirectButton(parent=self, relief=None, pos=TTLocalizer.FLPsecretsPos, image=(auxGui.find('**/ChtBx_ChtBtn_UP'), auxGui.find('**/ChtBx_ChtBtn_DN'), auxGui.find('**/ChtBx_ChtBtn_RLVR')), text=('',
TTLocalizer.FriendsListPanelSecrets,
TTLocalizer.FriendsListPanelSecrets,
''), text_scale=TTLocalizer.FLPsecrets, text_fg=(0, 0, 0, 1), text_bg=(1, 1, 1, 1), text_pos=(-0.04, -0.085), textMayChange=0, command=self.__secrets)
gui.removeNode()
auxGui.removeNode()
return
def unload(self):
if self.isLoaded == 0:
return None
self.isLoaded = 0
self.exit()
del self.title
del self.scrollList
del self.close
del self.left
del self.right
del self.friends
DirectFrame.destroy(self)
return None
def makeFriendButton(self, friendTuple, colorChoice = None, bold = 0):
playerName = None
toonName = None
if len(friendTuple) == 2:
avId, flags = friendTuple
playerId = None
showType = 0
elif len(friendTuple) == 3:
avId, flags, playerId = friendTuple
showType = 0
elif len(friendTuple) == 4:
avId, flags, playerId, showType = friendTuple
command = self.__choseFriend
playerName = None
if playerId:
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId, None)
if playerInfo:
playerName = playerInfo.playerName
toonName = None
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(avId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(avId)
if handle:
toonName = handle.getName()
if showType == 1 and playerId:
if not playerName:
return
print 'ABORTING!!!'
friendName = playerName
rolloverName = toonName
else:
if not toonName:
base.cr.fillUpFriendsMap()
return
friendName = toonName
if playerName:
rolloverName = playerName
else:
rolloverName = 'Unknown'
if playerId:
command = self.__chosePlayerFriend
thing = playerId
else:
thing = avId
fg = ToontownGlobals.ColorNoChat
if flags & ToontownGlobals.FriendChat:
fg = ToontownGlobals.ColorAvatar
if playerId:
fg = ToontownGlobals.ColorPlayer
if colorChoice:
fg = colorChoice
fontChoice = ToontownGlobals.getToonFont()
fontScale = 0.04
bg = None
if colorChoice and bold:
fontScale = 0.04
colorS = 0.7
bg = (colorChoice[0] * colorS,
colorChoice[1] * colorS,
colorChoice[2] * colorS,
colorChoice[3])
db = DirectButton(relief=None, text=friendName, text_scale=fontScale, text_align=TextNode.ALeft, text_fg=fg, text_shadow=bg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, text_font=fontChoice, textMayChange=0, command=command, extraArgs=[thing, showType])
if playerId:
accountName = DirectLabel(parent=db, pos=Vec3(-0.02, 0, 0), text=rolloverName, text_fg=(0, 0, 0, 1), text_bg=(1, 1, 1, 1), text_pos=(0, 0), text_scale=0.045, text_align=TextNode.ARight)
accountName.reparentTo(db.stateNodePath[2])
return db
def enter(self):
if self.isEntered == 1:
return None
self.isEntered = 1
if self.isLoaded == 0:
self.load()
base.localAvatar.obscureFriendsListButton(1)
if ToonAvatarPanel.ToonAvatarPanel.currentAvatarPanel:
ToonAvatarPanel.ToonAvatarPanel.currentAvatarPanel.cleanup()
ToonAvatarPanel.ToonAvatarPanel.currentAvatarPanel = None
self.__updateScrollList()
self.__updateTitle()
self.__updateArrows()
self.show()
self.accept('friendOnline', self.__friendOnline)
self.accept('friendPlayers', self.__friendPlayers)
self.accept('friendOffline', self.__friendOffline)
self.accept('friendsListChanged', self.__friendsListChanged)
self.accept('ignoreListChanged', self.__ignoreListChanged)
self.accept('friendsMapComplete', self.__friendsListChanged)
self.accept(OTPGlobals.PlayerFriendAddEvent, self.__friendsListChanged)
self.accept(OTPGlobals.PlayerFriendUpdateEvent, self.__friendsListChanged)
return
def exit(self):
if self.isEntered == 0:
return None
self.isEntered = 0
self.listScrollIndex[self.panelType] = self.scrollList.index
self.hide()
base.cr.cleanPetsFromFriendsMap()
self.ignore('friendOnline')
self.ignore('friendOffline')
self.ignore('friendsListChanged')
self.ignore('ignoreListChanged')
self.ignore('friendsMapComplete')
self.ignore(OTPGlobals.PlayerFriendAddEvent)
self.ignore(OTPGlobals.PlayerFriendUpdateEvent)
base.localAvatar.obscureFriendsListButton(-1)
messenger.send(self.doneEvent)
return None
def __close(self):
messenger.send('wakeup')
self.exit()
def __left(self):
messenger.send('wakeup')
self.listScrollIndex[self.panelType] = self.scrollList.index
if self.panelType > self.leftmostPanel:
self.panelType -= 1
self.__updateScrollList()
self.__updateTitle()
self.__updateArrows()
def __right(self):
messenger.send('wakeup')
self.listScrollIndex[self.panelType] = self.scrollList.index
if self.panelType < self.rightmostPanel:
self.panelType += 1
self.__updateScrollList()
self.__updateTitle()
self.__updateArrows()
def __secrets(self):
messenger.send('wakeup')
ToontownFriendSecret.showFriendSecret(ToontownFriendSecret.AvatarSecret)
def __newFriend(self):
messenger.send('wakeup')
messenger.send('friendAvatar', [None, None, None])
return
def __choseFriend(self, friendId, showType = 0):
messenger.send('wakeup')
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(friendId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(friendId)
if handle != None:
self.notify.info("Clicked on name in friend's list. doId = %s" % handle.doId)
messenger.send('clickedNametag', [handle])
return
def __chosePlayerFriend(self, friendId, showType = 1):
messenger.send('wakeup')
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = None
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(friendId)
handle = base.cr.identifyFriend(playerFriendInfo.avatarId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(playerFriendInfo.avatarId)
if playerFriendInfo != None:
self.notify.info("Clicked on name in player friend's list. Id = %s" % friendId)
messenger.send('clickedNametagPlayer', [handle, friendId, showType])
return
def __updateScrollList(self):
newFriends = []
petFriends = []
freeChatOneRef = []
speedChatOneRef = []
freeChatDouble = []
speedChatDouble = []
offlineFriends = []
if self.panelType == FLPPlayers:
playerFriendList = base.cr.playerFriendsManager.playerFriendsList
for playerFriendId in playerFriendList:
if base.cr.playerFriendsManager.playerId2Info.has_key(playerFriendId):
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerFriendId)
if playerFriendInfo.onlineYesNo:
if playerFriendInfo.understandableYesNo:
if playerFriendInfo.avatarId:
freeChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
freeChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
elif playerFriendInfo.avatarId:
speedChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
speedChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
elif playerFriendInfo.understandableYesNo:
freeChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
else:
speedChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
if self.panelType == FLPOnlinePlayers:
playerFriendList = base.cr.playerFriendsManager.playerFriendsList
for playerFriendId in playerFriendList:
if base.cr.playerFriendsManager.playerId2Info.has_key(playerFriendId):
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerFriendId)
if playerFriendInfo.onlineYesNo:
if playerFriendInfo.understandableYesNo:
if playerFriendInfo.avatarId:
freeChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
freeChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
elif playerFriendInfo.avatarId:
speedChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
speedChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
if self.panelType == FLPAll:
if base.friendMode == 0:
for friendPair in base.localAvatar.friendsList:
playerId = 0
if hasattr(base.cr, 'playerFriendsManager'):
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendPair[0])
if playerId:
if friendPair[1] & ToontownGlobals.FriendChat:
freeChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
else:
speedChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
elif base.cr.isFriendOnline(friendPair[0]):
if friendPair[1] & ToontownGlobals.FriendChat:
freeChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
speedChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
elif friendPair[1] & ToontownGlobals.FriendChat:
freeChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
speedChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
offlineFriends.append((friendPair[0],
friendPair[1],
playerId,
0))
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(avatarId)
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
if playerFriendInfo.understandableYesNo:
freeChatDouble.insert(0, (avatarId,
0,
playerId,
0))
else:
speedChatDouble.insert(0, (avatarId,
0,
playerId,
0))
elif base.friendMode == 1:
for friendId in base.cr.avatarFriendsManager.avatarFriendsList:
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendId)
newFriends.append((friendId,
0,
playerId,
0))
if self.panelType == FLPOnline:
if base.friendMode == 0:
for friendPair in base.localAvatar.friendsList:
if hasattr(base.cr, 'playerFriendsManager') and base.cr.isFriendOnline(friendPair[0]):
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendPair[0])
if playerId:
if friendPair[1] & ToontownGlobals.FriendChat:
freeChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
else:
speedChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
elif friendPair[1] & ToontownGlobals.FriendChat:
freeChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
speedChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
elif base.cr.isFriendOnline(friendPair[0]):
offlineFriends.append((friendPair[0],
friendPair[1],
0,
0))
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(avatarId)
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
if playerFriendInfo.understandableYesNo:
freeChatDouble.insert(0, (avatarId,
0,
playerId,
0))
else:
speedChatDouble.insert(0, (avatarId,
0,
playerId,
0))
elif base.friendMode == 1:
for friendId in base.cr.avatarFriendsManager.avatarFriendsList:
friendInfo = base.cr.avatarFriendsManager.avatarId2Info[friendId]
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendPair[0])
if friendInfo.onlineYesNo:
newFriends.insert(0, (friendId,
0,
playerId,
0))
if self.panelType == FLPPets:
for objId, obj in base.cr.doId2do.items():
from toontown.pets import DistributedPet
if isinstance(obj, DistributedPet.DistributedPet):
friendPair = (objId, 0)
petFriends.append(friendPair)
if self.panelType == FLPEnemies:
for ignored in base.localAvatar.ignoreList:
newFriends.append((ignored, 0))
if self.panelType == FLPAll or self.panelType == FLPOnline:
if base.wantPets and base.localAvatar.hasPet():
petFriends.insert(0, (base.localAvatar.getPetId(), 0))
for friendPair in self.friends.keys():
friendButton = self.friends[friendPair]
self.scrollList.removeItem(friendButton, refresh=0)
friendButton.destroy()
del self.friends[friendPair]
newFriends.sort(compareFriends)
petFriends.sort(compareFriends)
freeChatOneRef.sort(compareFriends)
speedChatOneRef.sort(compareFriends)
freeChatDouble.sort(compareFriends)
speedChatDouble.sort(compareFriends)
offlineFriends.sort(compareFriends)
for friendPair in newFriends:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in petFriends:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorNoChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in freeChatDouble:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorFreeChat, 1)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in freeChatOneRef:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorFreeChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in speedChatDouble:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorSpeedChat, 1)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in speedChatOneRef:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorSpeedChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in offlineFriends:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorNoChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
self.scrollList.index = self.listScrollIndex[self.panelType]
self.scrollList.refresh()
def __updateTitle(self):
if self.panelType == FLPOnline:
self.title['text'] = TTLocalizer.FriendsListPanelOnlineFriends
elif self.panelType == FLPAll:
self.title['text'] = TTLocalizer.FriendsListPanelAllFriends
elif self.panelType == FLPPets:
self.title['text'] = TTLocalizer.FriendsListPanelPets
elif self.panelType == FLPPlayers:
self.title['text'] = TTLocalizer.FriendsListPanelPlayers
elif self.panelType == FLPOnlinePlayers:
self.title['text'] = TTLocalizer.FriendsListPanelOnlinePlayers
else:
self.title['text'] = TTLocalizer.FriendsListPanelIgnoredFriends
self.title.resetFrameSize()
def __updateArrows(self):
if self.panelType == self.leftmostPanel:
self.left['state'] = 'inactive'
else:
self.left['state'] = 'normal'
if self.panelType == self.rightmostPanel:
self.right['state'] = 'inactive'
else:
self.right['state'] = 'normal'
def __friendOnline(self, doId, commonChatFlags, whitelistChatFlags):
if self.panelType == FLPOnline:
self.__updateScrollList()
def __friendOffline(self, doId):
if self.panelType == FLPOnline:
self.__updateScrollList()
def __friendPlayers(self, doId):
if self.panelType == FLPPlayers:
self.__updateScrollList()
def __friendsListChanged(self, arg1 = None, arg2 = None):
if self.panelType != FLPEnemies:
self.__updateScrollList()
def __ignoreListChanged(self):
if self.panelType == FLPEnemies:
self.__updateScrollList()
| |
"""Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
"Akima1DInterpolator", "CubicSpline"]
def prepare_input(x, y, axis, dydx=None):
"""Prepare input for cubic spline interpolators.
All data are converted to numpy arrays and checked for correctness.
Axes equal to `axis` of arrays `y` and `dydx` are rolled to be the 0-th
axis. The value of `axis` is converted to lie in
[0, number of dimensions of `y`).
"""
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
x = x.astype(float)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
if dydx is not None:
dydx = np.asarray(dydx)
if y.shape != dydx.shape:
raise ValueError("The shapes of `y` and `dydx` must be identical.")
if np.issubdtype(dydx.dtype, np.complexfloating):
dtype = complex
dydx = dydx.astype(dtype, copy=False)
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
if dydx is not None and not np.all(np.isfinite(dydx)):
raise ValueError("`dydx` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
y = np.rollaxis(y, axis)
if dydx is not None:
dydx = np.rollaxis(dydx, axis)
return x, dx, y, axis, dydx
class CubicHermiteSpline(PPoly):
"""Piecewise-cubic interpolator matching values and first derivatives.
The result is represented as a `PPoly` instance.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
dydx : array_like
Array containing derivatives of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), it is set to True.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
CubicSpline
PPoly
Notes
-----
If you want to create a higher-order spline matching higher-order
derivatives, use `BPoly.from_derivatives`.
References
----------
.. [1] `Cubic Hermite spline
<https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
on Wikipedia.
"""
def __init__(self, x, y, dydx, axis=0, extrapolate=None):
if extrapolate is None:
extrapolate = True
x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - dydx[:-1]) / dxr - t
c[2] = dydx[:-1]
c[3] = y[:-1]
super(CubicHermiteSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
class PchipInterpolator(CubicHermiteSpline):
r"""PCHIP 1-d monotonic cubic interpolation.
``x`` and ``y`` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. ``x`` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. ``y``'s length along the interpolation
axis must be equal to the length of ``x``. If N-D array, use ``axis``
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
CubicHermiteSpline
Akima1DInterpolator
CubicSpline
PPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x, _, y, axis, _ = prepare_input(x, y, axis)
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
dk = self._find_derivatives(xp, y)
super(PchipInterpolator, self).__init__(x, y, dk, axis=0,
extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `scipy.interpolate.PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
class Akima1DInterpolator(CubicHermiteSpline):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of ``y`` along the first axis
must be equal to the length of ``x``.
axis : int, optional
Specifies the axis of ``y`` along which to interpolate. Interpolation
defaults to the first axis of ``y``.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
x, dx, y, axis, _ = prepare_input(x, y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
super(Akima1DInterpolator, self).__init__(x, y, t, axis=0,
extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(CubicHermiteSpline):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and ``interpolate`` work independently, i.e. the
former controls only construction of a spline, and the latter only
evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(x, y, 'o', label='data')
>>> ax.plot(xs, np.sin(xs), label='true')
>>> ax.plot(xs, cs(xs), label="S")
>>> ax.plot(xs, cs(xs, 1), label="S'")
>>> ax.plot(xs, cs(xs, 2), label="S''")
>>> ax.plot(xs, cs(xs, 3), label="S'''")
>>> ax.set_xlim(-0.5, 9.5)
>>> ax.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> ax.axes.set_aspect('equal')
>>> ax.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, dx, y, axis, _ = prepare_input(x, y, axis)
n = len(x)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
# for more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
super(CubicSpline, self).__init__(x, y, s, axis=0,
extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| |
import synapse.common as s_common
import synapse.lib.time as s_time
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.grammar as s_grammar
class EconModule(s_module.CoreModule):
def getModelDefs(self):
return (('econ', {
'types': (
('econ:pay:cvv', ('str', {'regex': '^[0-9]{1,6}$'}), {
'doc': 'A Card Verification Value (CVV).'}),
('econ:pay:pin', ('str', {'regex': '^[0-9]{3,6}$'}), {
'doc': 'A Personal Identification Number.'}),
('econ:pay:mii', ('int', {'min': 0, 'max': 9}), {
'doc': 'A Major Industry Identifier (MII).'}),
('econ:pay:pan', ('str', {'regex': '^(?<iin>(?<mii>[0-9]{1})[0-9]{5})[0-9]{1,13}$'}), {
'doc': 'A Primary Account Number (PAN) or card number.'}),
('econ:pay:iin', ('int', {'min': 0, 'max': 999999}), {
'doc': 'An Issuer Id Number (IIN).'}),
('econ:pay:card', ('guid', {}), {
'doc': 'A single payment card.'}),
('econ:purchase', ('guid', {}), {
'doc': 'A purchase event.'}),
('econ:acquired', ('comp', {'fields': (('purchase', 'econ:purchase'), ('item', 'ndef'))}), {
'doc': 'A relationship between a purchase event and a purchased item.'}),
('econ:acct:payment', ('guid', {}), {
'doc': 'A payment or crypto currency transaction.'}),
('econ:acct:balance', ('guid', {}), {
'doc': 'A snapshot of the balance of an account at a point in time.'}),
('econ:price', ('hugenum', {'norm': False}), {
'doc': 'The amount of money expected, required, or given in payment for something',
'ex': '2.20'}),
('econ:currency', ('str', {'lower': True, 'strip': False}), {
'doc': 'The name of a system of money in general use',
'ex': 'usd'}),
('econ:fin:exchange', ('guid', {}), {
'doc': 'A financial exchange where securities are traded.'}),
('econ:fin:security', ('guid', {}), {
'doc': 'A financial security which is typically traded on an exchange.'}),
('econ:fin:bar', ('guid', {}), {
'doc': 'A sample of the open, close, high, low prices of a security in a specific time window'}),
('econ:fin:tick', ('guid', {}), {
'doc': 'A sample of the price of a security at a single moment in time'}),
# TODO currency / monetary units / crypto currency
# econ:acct:bill
# econ:goods econ:services
# econ:bank:us:aba:rtn ( ABA Routing Number )
# econ:bank:us:account = (econ:bank:us:aba:rtn, acct)
# econ:bank:swift:...
),
'forms': (
('econ:pay:iin', {}, (
('org', ('ou:org', {}), {
'doc': 'The issuer organization.'}),
('name', ('str', {'lower': True}), {
'doc': 'The registered name of the issuer.'}),
)),
('econ:pay:card', {}, (
('pan', ('econ:pay:pan', {}), {
'doc': 'The payment card number.'}),
('pan:mii', ('econ:pay:mii', {}), {
'doc': 'The payment card MII.'}),
('pan:iin', ('econ:pay:iin', {}), {
'doc': 'The payment card IIN.'}),
('name', ('ps:name', {}), {
'doc': 'The name as it appears on the card.'}),
('expr', ('time', {}), {
'doc': 'The expiration date for the card.'}),
('cvv', ('econ:pay:cvv', {}), {
'doc': 'The Card Verification Value on the card.'}),
('pin', ('econ:pay:pin', {}), {
'doc': 'The Personal Identification Number on the card.'}),
)),
('econ:purchase', {}, (
('by:contact', ('ps:contact', {}), {
'doc': 'The contact information used to make the purchase.'}),
('from:contact', ('ps:contact', {}), {
'doc': 'The contact information used to sell the item.'}),
('time', ('time', {}), {
'doc': 'The time of the purchase.'}),
('place', ('geo:place', {}), {
'doc': 'The place where the purchase took place.'}),
('paid', ('bool', {}), {
'doc': 'Set to True if the purchase has been paid in full.'}),
('paid:time', ('time', {}), {
'doc': 'The point in time where the purchase was paid in full.'}),
('settled', ('time', {}), {
'doc': 'The point in time where the purchase was settled.'}),
('campaign', ('ou:campaign', {}), {
'doc': 'The campaign that the purchase was in support of.'}),
('price', ('econ:price', {}), {
'doc': 'The econ:price of the purchase'}),
('currency', ('econ:currency', {}), {
'doc': 'The econ:price of the purchase'}),
)),
('econ:acquired', {}, (
('purchase', ('econ:purchase', {}), {
'doc': 'The purchase event which acquired an item.', 'ro': True, }),
('item', ('ndef', {}), {
'doc': 'A reference to the item that was acquired.', 'ro': True, }),
('item:form', ('str', {}), {
'doc': 'The form of item purchased.'}),
)),
('econ:acct:payment', {}, (
('txnid', ('str', {'strip': True}), {
'doc': 'A payment processor specific transaction id.'}),
('fee', ('econ:price', {}), {
'doc': 'The transaction fee paid by the recipient to the payment processor.'}),
('from:pay:card', ('econ:pay:card', {}), {
'doc': 'The payment card making the payment.'}),
('from:contract', ('ou:contract', {}), {
'doc': 'A contract used as an aggregate payment source.'}),
('from:coinaddr', ('crypto:currency:address', {}), {
'doc': 'The crypto currency address making the payment.'}),
('from:contact', ('ps:contact', {}), {
'doc': 'Contact information for the person/org being paid.'}),
('to:coinaddr', ('crypto:currency:address', {}), {
'doc': 'The crypto currency address receiving the payment.'}),
('to:contact', ('ps:contact', {}), {
'doc': 'Contact information for the person/org being paid.'}),
('to:contract', ('ou:contract', {}), {
'doc': 'A contract used as an aggregate payment destination.'}),
('time', ('time', {}), {
'doc': 'The time the payment was processed.'}),
('purchase', ('econ:purchase', {}), {
'doc': 'The purchase which the payment was paying for.'}),
('amount', ('econ:price', {}), {
'doc': 'The amount of money transferred in the payment'}),
('currency', ('econ:currency', {}), {
'doc': 'The currency of the payment'}),
('memo', ('str', {}), {
'doc': 'A small note specified by the payer common in financial transactions.'}),
('crypto:transaction', ('crypto:currency:transaction', {}), {
'doc': 'A crypto currency transaction that initiated the payment.'}),
)),
('econ:acct:balance', {}, (
('time', ('time', {}), {
'doc': 'The time the balance was recorded.'}),
('pay:card', ('econ:pay:card', {}), {
'doc': 'The payment card holding the balance.'}),
('crypto:address', ('crypto:currency:address', {}), {
'doc': 'The crypto currency address holding the balance.'}),
('amount', ('econ:price', {}), {
'doc': 'The account balance at the time.'}),
('currency', ('econ:currency', {}), {
'doc': 'The currency of the balance amount.'}),
('delta', ('econ:price', {}), {
'doc': 'The change since last regular sample.'}),
)),
('econ:fin:exchange', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'A simple name for the exchange',
'ex': 'nasdaq'}),
('org', ('ou:org', {}), {
'doc': 'The organization that operates the exchange'}),
('currency', ('econ:currency', {}), {
'doc': 'The currency used for all transactions in the exchange',
'ex': 'usd'}),
)),
('econ:fin:security', {}, (
('exchange', ('econ:fin:exchange', {}), {
'doc': 'The exchange on which the security is traded'}),
('ticker', ('str', {'lower': True, 'strip': True}), {
'doc': 'The identifier for this security within the exchange'}),
('type', ('str', {'lower': True, 'strip': True}), {
'doc': 'A user defined type such as stock, bond, option, future, or forex'}),
('price', ('econ:price', {}), {
'doc': 'The last known/available price of the security'}),
('time', ('time', {}), {
'doc': 'The time of the last know price sample'}),
)),
('econ:fin:tick', {}, (
('security', ('econ:fin:security', {}), {
'doc': 'The security measured by the tick'}),
('time', ('time', {}), {
'doc': 'The time the price was sampled'}),
('price', ('econ:price', {}), {
'doc': 'The price of the security at the time'}),
)),
('econ:fin:bar', {}, (
('security', ('econ:fin:security', {}), {
'doc': 'The security measured by the bar'}),
('ival', ('ival', {}), {
'doc': 'The interval of measurement'}),
('price:open', ('econ:price', {}), {
'doc': 'The opening price of the security'}),
('price:close', ('econ:price', {}), {
'doc': 'The closing price of the security'}),
('price:low', ('econ:price', {}), {
'doc': 'The low price of the security'}),
('price:high', ('econ:price', {}), {
'doc': 'The high price of the security'}),
)),
),
}),)
| |
"""
This code includes simple dense layer.
Dense layer is also well known as fully-connected alyer.
"""
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
from lemontree.layers.layer import BaseLayer
class DenseLayer(BaseLayer):
"""
This class implements dense layer connection.
"""
def __init__(self, input_shape, output_shape, use_bias=True, target_cpu=False):
"""
This function initializes the class.
Input is 2D tensor, output is 2D tensor.
For efficient following batch normalization, use_bias = False.
Parameters
----------
input_shape: tuple
a tuple of single value, i.e., (input dim,)
output_shape: tupe
a tuple of single value, i.e., (output dim,)
use_bias: bool, default: True
a bool value whether we use bias or not.
target_cpu: bool, default: False
a bool value whether shared variable will be on cpu or gpu.
"""
super(DenseLayer, self).__init__()
# check asserts
assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple with single value.'
assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple with single value.'
assert isinstance(use_bias, bool), '"use_bias" should be a bool value.'
assert isinstance(target_cpu, bool), '"target_cpu" should be a bool value.'
# set members
self.input_shape = input_shape
self.output_shape = output_shape
self.use_bias = use_bias
self.target_cpu = target_cpu
def set_shared(self):
"""
This function overrides the parents' one.
Set shared variables.
Shared Variables
----------------
W: 2D matrix
shape is (input dim, output dim).
b: 1D vector
shape is (output dim,).
"""
W = np.zeros((self.input_shape[0], self.output_shape[0])).astype(theano.config.floatX) # weight matrix
if self.target_cpu:
self.W = theano.shared(W, self.name + '_weight', target='cpu')
else:
self.W = theano.shared(W, self.name + '_weight')
self.W.tags = ['weight', self.name]
b = np.zeros(self.output_shape,).astype(theano.config.floatX) # bias vector, initialize with 0.
if self.target_cpu:
self.b = theano.shared(b, self.name + '_bias', target='cpu')
else:
self.b = theano.shared(b, self.name + '_bias')
self.b.tags = ['bias', self.name]
def set_shared_by(self, params):
if self.use_bias:
self.W = params[0]
self.b = params[1]
else:
self.W = params[0]
def get_output(self, input_):
"""
This function overrides the parents' one.
Creates symbolic function to compute output from an input.
Math Expression
-------------------
Y = dot(X, W) + b
Y = dot(X, W)
bias is automatically broadcasted. (supported theano feature)
Parameters
----------
input_: TensorVariable
Returns
-------
TensorVariable
"""
if self.use_bias:
return T.dot(input_, self.W) + self.b
else:
return T.dot(input_, self.W)
def get_params(self):
"""
This function overrides the parents' one.
Returns interal layer parameters.
Returns
-------
list
a list of shared variables used.
"""
if self.use_bias:
return [self.W, self.b]
else:
return [self.W]
class TimeDistributedDenseLayer(BaseLayer):
"""
This class implements time distributed dense layer connection.
"""
def __init__(self, input_shape, output_shape, use_bias=True, target_cpu=False):
"""
This function initializes the class.
Input is 3D tensor, output is 3D tensor.
For efficient following batch normalization, use_bias = False.
Parameters
----------
input_shape: tuple
a tuple of single value, i.e., (input dim,)
output_shape: tupe
a tuple of single value, i.e., (output dim,)
use_bias: bool, default: True
a bool value whether we use bias or not.
target_cpu: bool, default: False
a bool value whether shared variable will be on cpu or gpu.
"""
super(TimeDistributedDenseLayer, self).__init__()
# check asserts
assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple with single value.'
assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple with single value.'
assert isinstance(use_bias, bool), '"use_bias" should be a bool value.'
assert isinstance(target_cpu, bool), '"target_cpu" should be a bool value.'
# set members
self.input_shape = input_shape
self.output_shape = output_shape
self.use_bias = use_bias
self.target_cpu = target_cpu
def set_shared(self):
"""
This function overrides the parents' one.
Set shared variables.
Shared Variables
----------------
W: 2D matrix
shape is (input dim, output dim).
b: 1D vector
shape is (output dim,).
"""
W = np.zeros((self.input_shape[0], self.output_shape[0])).astype(theano.config.floatX) # weight matrix
if self.target_cpu:
self.W = theano.shared(W, self.name + '_weight', target='cpu')
else:
self.W = theano.shared(W, self.name + '_weight')
self.W.tags = ['weight', self.name]
b = np.zeros(self.output_shape,).astype(theano.config.floatX) # bias vector, initialize with 0.
if self.target_cpu:
self.b = theano.shared(b, self.name + '_bias', target='cpu')
else:
self.b = theano.shared(b, self.name + '_bias')
self.b.tags = ['bias', self.name]
def set_shared_by(self, params):
if self.use_bias:
self.W = params[0]
self.b = params[1]
else:
self.W = params[0]
def get_output(self, input_):
"""
This function overrides the parents' one.
Creates symbolic function to compute output from an input.
Math Expression
-------------------
Y = dot(X, W) + b
Y = dot(X, W)
bias is automatically broadcasted. (supported theano feature)
Parameters
----------
input_: TensorVariable
Returns
-------
TensorVariable
"""
input__ = input_.dimshuffle(1,0,2) # (sequence_length, batch_size, input_dim)
def step(input__):
if self.use_bias:
return T.dot(input__, self.W) + self.b
else:
return T.dot(input__, self.W)
output_ = theano.scan(step,
sequences=[input__],
outputs_info=[None])[0]
output = output_.dimshuffle(1,0,2) # (batch_size, sequence_length, output_dim)
return output
def get_params(self):
"""
This function overrides the parents' one.
Returns interal layer parameters.
Returns
-------
list
a list of shared variables used.
"""
if self.use_bias:
return [self.W, self.b]
else:
return [self.W]
| |
import datetime
import pytz
from django import http
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.utils.timezone import utc, make_naive
from django.db import transaction
from django.conf import settings
from django.core.urlresolvers import reverse
from django.views.decorators.cache import never_cache
from slugify import slugify
from jsonview.decorators import json_view
from airmozilla.main.models import (
SuggestedEvent,
Event,
Channel,
SuggestedEventComment,
SuggestedCuratedGroup,
)
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.base.utils import tz_apply
from . import utils
from . import forms
from . import sending
def _increment_slug_if_exists(slug):
base = slug
count = 2
def exists(slug):
return (
Event.objects.filter(slug__iexact=slug) or
SuggestedEvent.objects.filter(slug__iexact=slug)
)
while exists(slug):
slug = base + '-%s' % count
count += 1
return slug
@login_required
@transaction.atomic
def start(request):
data = {}
if request.method == 'POST':
form = forms.StartForm(request.POST, user=request.user)
if form.is_valid():
slug = slugify(form.cleaned_data['title']).lower()
slug = _increment_slug_if_exists(slug)
event = SuggestedEvent.objects.create(
user=request.user,
title=form.cleaned_data['title'],
slug=slug,
)
# Enable discussion on by default.
# https://bugzilla.mozilla.org/show_bug.cgi?id=1135822
SuggestedDiscussion.objects.create(
event=event,
enabled=True,
notify_all=True,
)
event.channels.add(
Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)
)
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
form = forms.StartForm(user=request.user)
data['suggestions'] = (
SuggestedEvent.objects
.filter(user=request.user)
.exclude(status=SuggestedEvent.STATUS_REMOVED)
.order_by('modified')
)
data['form'] = form
data['event'] = None
return render(request, 'suggest/start.html', data)
@login_required
@transaction.atomic
def title(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.TitleForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
form = forms.TitleForm(instance=event)
data = {'form': form, 'event': event}
return render(request, 'suggest/title.html', data)
@never_cache
@login_required
@transaction.atomic
def description(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.DescriptionForm(request.POST, instance=event)
if form.is_valid():
form.save()
# XXX use next_url() instead?
url = reverse('suggest:details', args=(event.pk,))
return redirect(url)
else:
form = forms.DescriptionForm(instance=event)
data = {'form': form, 'event': event}
return render(request, 'suggest/description.html', data)
@never_cache
@login_required
@transaction.atomic
def details(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
try:
discussion = SuggestedDiscussion.objects.get(event=event)
except SuggestedDiscussion.DoesNotExist:
discussion = None
curated_groups = (
SuggestedCuratedGroup.objects.filter(event=event).order_by('created')
)
if request.method == 'POST':
form = forms.DetailsForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
# the start_time comes to us as a string, e.g. '2014-01-01
# 12:00:00' and that'll be converted into '2014-01-01
# 12:00:00 tzinfo=UTC' automatically. But that's not what we want
# so we change it first.
event.start_time = tz_apply(
event.start_time,
pytz.timezone(event.location.timezone)
)
event.save()
next_url = reverse('suggest:placeholder', args=(event.pk,))
if form.cleaned_data['enable_discussion']:
if discussion:
# make sure it's enabled
discussion.enabled = True
# discussion.moderate_all = (
# event.privacy != Event.PRIVACY_COMPANY
# )
discussion.save()
else:
discussion = SuggestedDiscussion.objects.create(
event=event,
enabled=True,
notify_all=True,
# moderate_all=event.privacy != Event.PRIVACY_COMPANY
)
if request.user not in discussion.moderators.all():
discussion.moderators.add(request.user)
next_url = reverse('suggest:discussion', args=(event.pk,))
elif SuggestedDiscussion.objects.filter(event=event):
discussion = SuggestedDiscussion.objects.get(event=event)
discussion.enabled = False
discussion.save()
return redirect(next_url)
else:
if event.location and event.start_time:
# Because the modelform is going present our user
# without input widgets' that are datetimes in
# naive format, when it does this is does so using the
# settings.TIME_ZONE and when saved it applies the
# settings.TIME_ZONE back again.
# Normally in Django templates, this is solved with
# {% timezone "Europe/Paris" %}
# {{ form.as_p }}
# {% endtimezone %}
# But that's not going to work when working with jinja
# so we do it manually from the view code.
event.start_time = make_naive(
event.start_time,
pytz.timezone(event.location.timezone)
)
initial = {
'enable_discussion': not (event and not discussion),
'curated_groups': curated_groups.values_list(
'name',
flat=True
)
}
curated_groups_choices = [
(x, x) for x in initial['curated_groups']
]
form = forms.DetailsForm(
instance=event,
initial=initial,
no_tag_choices=True,
curated_groups_choices=curated_groups_choices,
)
data = {'form': form, 'event': event}
return render(request, 'suggest/details.html', data)
@never_cache
@login_required
@transaction.atomic
def discussion(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
discussion = SuggestedDiscussion.objects.get(event=event)
if request.method == 'POST':
form = forms.DiscussionForm(
request.POST,
instance=discussion,
all_emails=True,
)
if form.is_valid():
discussion = form.save()
discussion.moderators.clear()
for email in form.cleaned_data['emails']:
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
user = User.objects.create(
username=email.split('@')[0],
email=email
)
user.set_unusable_password()
user.save()
discussion.moderators.add(user)
url = reverse('suggest:placeholder', args=(event.pk,))
return redirect(url)
else:
emails = []
for moderator in discussion.moderators.all():
if moderator.email not in emails:
emails.append(moderator.email)
if not emails:
emails.append(request.user.email)
initial = {'emails': emails}
form = forms.DiscussionForm(instance=discussion, initial=initial)
context = {'event': event, 'form': form, 'discussion': discussion}
return render(request, 'suggest/discussion.html', context)
@login_required
@json_view
def autocomplete_emails(request):
if 'q' not in request.GET:
return http.HttpResponseBadRequest('Missing q')
q = request.GET.get('q', '').strip()
emails = []
if len(q) > 1:
users = (
User.objects
.filter(email__istartswith=q)
.exclude(email__isnull=True)
)
for user in users.order_by('email'):
if user.email not in emails:
emails.append(user.email)
if not emails:
if utils.is_valid_email(q):
emails.append(q)
elif utils.is_valid_email('%s@mozilla.com' % q):
emails.append('%s@mozilla.com' % q)
return {'emails': emails}
@never_cache
@login_required
@transaction.atomic
def placeholder(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.PlaceholderForm(
request.POST,
request.FILES,
instance=event
)
if form.is_valid():
event = form.save()
if form['placeholder_img'].value() != event.placeholder_img:
# User selected a new placeholder image. Clear gallery select.
event.picture = None
event.save()
# XXX use next_url() instead?
url = reverse('suggest:summary', args=(event.pk,))
return redirect(url)
else:
form = forms.PlaceholderForm()
if event.picture:
form.fields['picture'].initial = event.picture.id
data = {'form': form, 'event': event}
return render(request, 'suggest/placeholder.html', data)
@never_cache
@login_required
@transaction.atomic
def summary(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
# it's ok if it's submitted and you have the 'add_event'
# permission
if request.user.has_perm('main.add_event'):
if not event.submitted:
return http.HttpResponseBadRequest('Not submitted')
else:
return http.HttpResponseBadRequest('Not your event')
comment_form = forms.SuggestedEventCommentForm()
if request.method == 'POST':
if request.POST.get('save_comment'):
comment_form = forms.SuggestedEventCommentForm(data=request.POST)
if comment_form.is_valid():
comment = SuggestedEventComment.objects.create(
comment=comment_form.cleaned_data['comment'].strip(),
user=request.user,
suggested_event=event
)
if event.submitted:
sending.email_about_suggested_event_comment(
comment,
request
)
messages.info(
request,
'Comment added and producers notified by email.'
)
else:
messages.info(
request,
'Comment added but not emailed to producers because '
'the event is not submitted.'
)
return redirect('suggest:summary', event.pk)
else:
if event.submitted:
event.status = SuggestedEvent.STATUS_RETRACTED
event.submitted = None
event.save()
else:
now = datetime.datetime.utcnow().replace(tzinfo=utc)
event.submitted = now
if not event.first_submitted:
event.status = SuggestedEvent.STATUS_SUBMITTED
event.first_submitted = now
else:
# it was only resubmitted if it was previously rejected
if event.status == SuggestedEvent.STATUS_REJECTED:
event.status = SuggestedEvent.STATUS_RESUBMITTED
else:
event.status = SuggestedEvent.STATUS_SUBMITTED
event.save()
sending.email_about_suggested_event(event, request)
url = reverse('suggest:summary', args=(event.pk,))
return redirect(url)
# we don't need the label for this form layout
comment_form.fields['comment'].label = ''
comments = (
SuggestedEventComment.objects
.filter(suggested_event=event)
.select_related('user')
.order_by('created')
)
discussion = None
for each in SuggestedDiscussion.objects.filter(event=event):
discussion = each
curated_groups = SuggestedCuratedGroup.objects.none()
# It only matters if the privacy
# is SuggestedEvent.PRIVACY_SOME_CONTRIBUTORS.
if event.privacy == SuggestedEvent.PRIVACY_SOME_CONTRIBUTORS:
curated_groups = SuggestedCuratedGroup.objects.filter(
event=event
).order_by('name')
context = {
'event': event,
'comment_form': comment_form,
'comments': comments,
'discussion': discussion,
'curated_groups': curated_groups,
}
return render(request, 'suggest/summary.html', context)
@csrf_exempt
@require_POST
@login_required
def delete(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
event.delete()
return redirect('suggest:start')
| |
import json
from ct.models import *
from django.contrib.auth.models import User
from django.utils import dateparse, timezone
from datetime import datetime
import codecs
def store_errors(q, concept, parentUL, conceptIDdict=None):
'store error models associated with question'
errorModels = []
courseletsMap = q.get('courseletsMapError', ())
if conceptIDdict is None:
conceptIDdict = {}
for i, e in enumerate(q.get('error', ())):
em = emUL = saveMapping = None
try:
mapID = courseletsMap[i]
except IndexError:
pass
else:
ulID = conceptIDdict.get(mapID, None)
if ulID is None:
saveMapping = True
print 'WARNING: %s not found in conceptIDdict; treating as new error model' % mapID
else:
if isinstance(ulID, int): # just add existing EM to this question
ul = UnitLesson.objects.get(pk=ulID)
emUL = UnitLesson.create_from_lesson(ul.lesson, unit,
parent=parentUL)
else: # add new EMLesson to existing EM
try:
if not ulID.startswith('fork:'):
raise ValueError
ul = UnitLesson.objects.get(pk=int(ulID[5:]))
except ValueError:
raise ValueError('bad conceptIDdict ID value %s: should be int or "fork:INT"'
% ulID)
em = ul.lesson.concept # link to existing error model
if not ul.lesson.concept or not ul.lesson.concept.isError:
raise ValueError('%s: not a valid error model'
% ul.lesson.title)
if not emUL: # create new error model lesson
emLesson = Lesson(title=e.get('title', '(rename this)'),
addedBy=parentUL.addedBy,
text=e.get('explanation', '(write an explanation)'))
emUL = emLesson.save_as_error_model(concept, parentUL, em)
if saveMapping: # allow other questions to fork this EM
conceptIDdict[mapID] = 'fork:%d' % emUL.pk
errorModels.append(emUL)
return errorModels
def get_or_create_user(username, email='unknown'):
'get user object with specified username, create it if necessary'
try:
u = User.objects.get(username=username)
except User.DoesNotExist:
u = User.objects.create_user(username, email, None,
first_name='Student', last_name=username)
u.save()
return u
def store_response_errors(r, errorModels, response, genericErrors,
genericIndex):
'store all student errors associated with a response'
for se in r['errors']:
error_id = se['error_id']
if isinstance(error_id, int):
emUL = errorModels[error_id]
else: # look up generic error model
i = genericIndex[error_id]
emUL = genericErrors[i]
studentError = StudentError(response=response, atime=response.atime,
errorModel=emUL, author=response.author)
studentError.save()
def store_response(r, course, parentUL, errorModels, genericErrors,
genericIndex, tzinfo=timezone.get_default_timezone()):
'load response w/ username, confidence, selfeval, errors'
user = get_or_create_user(r['username'])
confidence = Response.CONF_CHOICES[r['confidence']][0]
atime = dateparse.parse_datetime(r['submit_time'])
atime = timezone.make_aware(atime, tzinfo)
response = Response(unitLesson=parentUL, lesson=parentUL.lesson,
text=r['answer'], course=course, author=user,
confidence=confidence, atime=atime)
if 'selfeval' in r:
response.selfeval = r['selfeval']
response.save()
store_response_errors(r, errorModels, response, genericErrors,
genericIndex)
return response
def add_concept_resource(conceptID, unit, conceptIDdict=()):
'get concept by courseletsConcept:ID or wikipedia ID, add to unit'
if conceptID.startswith('courseletsConcept:'):
ulID = int(conceptID[18:])
elif conceptID in conceptIDdict:
ulID = conceptIDdict[conceptID]
else:
ulID = None
if ulID is not None:
ul = UnitLesson.objects.get(pk=ulID)
lesson = ul.lesson
concept = lesson.concept
if not concept:
raise ValueError('%s does not link to a concept!' % conceptID)
else:
concept, lesson = Concept.get_from_sourceDB(conceptID, unit.addedBy)
if unit.unitlesson_set.filter(lesson__concept=concept).count() <= 0:
UnitLesson.create_from_lesson(lesson, unit) # attach as unit resource
return concept
def store_new_question(q, unit, concept,
tzinfo=timezone.get_default_timezone(),
kind=Lesson.ORCT_QUESTION):
'create new lessons for question, answer, and add to this unit'
lesson = Lesson(title=q['title'], text=q['text'], addedBy=unit.addedBy,
kind=kind)
if 'date_added' in q:
d = dateparse.parse_date(q['date_added'])
atime = datetime(d.year, d.month, d.day)
lesson.atime = timezone.make_aware(atime, tzinfo)
lesson.save_root(concept)
unitLesson = UnitLesson.create_from_lesson(lesson, unit, order='APPEND',
addAnswer=True)
answer = unitLesson._answer.lesson # get auto-created record
answer.title = q['title'] + ' Answer' # update answer text
answer.text = q['answer']
answer.save()
return unitLesson
def store_question(q, course, unit, genericErrors, genericIndex,
conceptIDdict=(), **kwargs):
'store question linked to concept, error models, answer, responses'
conceptID = q['tests'][0] # link to first concept
concept = add_concept_resource(conceptID, unit, conceptIDdict)
unitLesson = store_new_question(q, unit, concept, **kwargs)
errorModels = store_errors(q, concept, unitLesson, conceptIDdict)
for r in q.get('responses', ()):
store_response(r, course, unitLesson, errorModels, genericErrors,
genericIndex)
print 'saved %s: %d error models, %d responses' \
% (unitLesson.lesson.title, len(errorModels),
len(q.get('responses', ())))
return unitLesson
def index_generic_errors(unit):
'extract generic error models and construct phrase index'
genericErrors = unit.get_aborts()
l = []
for i, ul in enumerate(genericErrors):
l.append((i, ul.lesson.title))
genericIndex = PhraseIndex(l)
return genericErrors, genericIndex
def load_orct_data(infile='orctmerge.json', course=None, unit=None,
courseID=None, unitID=None, conceptIDfile=None):
'load ORCT questions, responses etc into this unit'
if course is None:
course = Course.objects.get(pk=courseID)
if unit is None:
unit = Unit.objects.get(pk=unitID)
genericErrors, genericIndex = index_generic_errors(unit)
orctData = load_json(infile)
if conceptIDfile:
conceptIDdict = load_json(conceptIDfile)
else:
conceptIDdict = {}
for q in orctData:
if q.get('kind', 'SKIP') == 'question':
store_question(q, course, unit, genericErrors, genericIndex,
conceptIDdict)
def load_json(infile):
with codecs.open(infile, 'r', encoding='utf-8') as ifile:
data = json.load(ifile)
return data
class PhraseIndex(object):
def __init__(self, t, nword=2):
'construct phrase index for list of entries of the form [(id, text),]'
self.nword = nword
d = {}
self.sizes = {}
for i, text in t:
n, l = self.get_phrases(text)
self.sizes[i] = n # save numbers of phrases
for j in range(n): # index all phrases in this text
phrase = tuple(l[j:j + nword])
try:
d[phrase].append(i)
except KeyError:
d[phrase] = [i]
self.d = d
def get_phrases(self, text):
'split into words, handling case < nword gracefully'
l = text.split()
if len(l) > self.nword:
return len(l) - self.nword + 1, l
else: # handle short phrases gracefully to allow matching
return 1, l
def __getitem__(self, text):
'find entry with highest phrase match fraction'
n, l = self.get_phrases(text)
counts = {}
for j in range(n):
phrase = tuple(l[j:j + self.nword])
for i in self.d.get(phrase, ()):
counts[i] = counts.get(i, 0) + 1
if not counts:
raise KeyError
l = []
for i, c in counts.items(): # compute match fractions
l.append((c / float(self.sizes[i]), i))
l.sort()
return l[-1][1] # return id with highest match fraction
| |
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import inspect
import os
import shutil
import tempfile
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import keymgr
from nova import objects
from nova.openstack.common import imageutils
from nova import test
from nova.tests.unit import fake_processutils
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import rbd_utils
CONF = cfg.CONF
CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
class _ImageTestCase(object):
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.CONTEXT = context.get_admin_context()
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def tearDown(self):
super(_ImageTestCase, self).tearDown()
shutil.rmtree(self.INSTANCES_PATH)
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_libvirt_fs_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
fs = image.libvirt_fs_info("/mnt")
# check that exception hasn't been raised and the method
# returned correct object
self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys)
self.assertEqual(fs.target_dir, "/mnt")
if image.is_block_dev:
self.assertEqual(fs.source_type, "block")
self.assertEqual(fs.source_dev, image.path)
else:
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.source_file, image.path)
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
def test_create_image_extend(self, fake_qemu_img_info):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
os.path.exists(CONF.instances_path).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
@mock.patch.object(images, 'qemu_img_info',
side_effect=exception.InvalidDiskInfo(
reason='invalid path'))
def test_resolve_driver_format(self, fake_qemu_img_info):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'raw')
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskTooSmall,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_resolve_driver_format(self):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'qcow2')
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.lvm = imagebackend.lvm
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.lvm, 'create_volume')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.lvm.create_volume(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
self.lvm.remove_volumes([self.PATH])
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.lvm.create_volume(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
self.lvm.remove_volumes([self.PATH])
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
super(EncryptedLvmTestCase, self).setUp()
self.image_class = imagebackend.Lvm
self.flags(enabled=True, group='ephemeral_storage_encryption')
self.flags(cipher='aes-xts-plain64',
group='ephemeral_storage_encryption')
self.flags(key_size=512, group='ephemeral_storage_encryption')
self.flags(fixed_key='00000000000000000000000000000000'
'00000000000000000000000000000000',
group='keymgr')
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
self.PATH = os.path.join('/dev/mapper',
imagebackend.dmcrypt.volume_name(self.LV))
self.key_manager = keymgr.API()
self.INSTANCE['ephemeral_key_uuid'] =\
self.key_manager.create_key(self.CONTEXT)
self.KEY = self.key_manager.get_key(self.CONTEXT,
self.INSTANCE['ephemeral_key_uuid']).get_encoded()
self.lvm = imagebackend.lvm
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
self.dmcrypt = imagebackend.dmcrypt
def _create_image(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT,
max_size=self.TEMPLATE_SIZE,
target=self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
def _create_image_generated(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(target=self.PATH,
ephemeral_size=None, context=self.CONTEXT)
def _create_image_resize(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.lvm.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(
self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_encrypt_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.dmcrypt.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.dmcrypt.volume_name(self.LV),
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(
target=self.PATH,
ephemeral_size=None,
context=self.CONTEXT)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_encrypt_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.mox.StubOutWithMock(rbd_utils, 'rbd')
self.mox.StubOutWithMock(rbd_utils, 'rados')
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
fn = self.mox.CreateMockAnything()
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.mox.CreateMockAnything()
fn(max_size=None, target=self.TEMPLATE_PATH)
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(False)
image.check_image_exists().AndReturn(False)
self.mox.ReplayAll()
image.create_image(fn, self.TEMPLATE_PATH, None)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_create_image_resize(self):
fn = self.mox.CreateMockAnything()
full_size = self.SIZE * 2
fn(max_size=full_size, target=self.TEMPLATE_PATH)
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(False)
image.check_image_exists().AndReturn(False)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.mox.StubOutWithMock(image, 'get_disk_size')
image.get_disk_size(rbd_name).AndReturn(self.SIZE)
self.mox.StubOutWithMock(image.driver, 'resize')
image.driver.resize(rbd_name, full_size)
self.mox.ReplayAll()
image.create_image(fn, self.TEMPLATE_PATH, full_size)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_create_image_already_exists(self):
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(True)
self.mox.StubOutWithMock(image, 'get_disk_size')
image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
image.check_image_exists().AndReturn(True)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
image.get_disk_size(rbd_name).AndReturn(self.SIZE)
self.mox.ReplayAll()
fn = self.mox.CreateMockAnything()
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
def fake_resize(rbd_name, size):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
inspect.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Ploop
super(PloopTestCase, self).setUp()
self.utils = imagebackend.utils
self.stubs.Set(imagebackend.Ploop, 'get_disk_size', lambda a, b: 2048)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=2048, image_id=None)
img_path = os.path.join(self.PATH, "root.hds")
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, img_path)
self.utils.execute("ploop", "restore-descriptor", "-f", "raw",
self.PATH, img_path)
self.utils.execute("ploop", "grow", '-s', "2K",
os.path.join(self.PATH, "DiskDescriptor.xml"),
run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):
super(BackendTestCase, self).setUp()
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_raw_preallocate_images(self):
flags = ('space', 'Space', 'SPACE')
for f in flags:
self.flags(preallocate_images=f)
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(raw.preallocate)
def test_image_raw_preallocate_images_bad_conf(self):
self.flags(preallocate_images='space1')
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(raw.preallocate)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_qcow2_preallocate_images(self):
flags = ('space', 'Space', 'SPACE')
for f in flags:
self.flags(preallocate_images=f)
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(qcow.preallocate)
def test_image_qcow2_preallocate_images_bad_conf(self):
self.flags(preallocate_images='space1')
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(qcow.preallocate)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.mox.StubOutWithMock(rbd_utils, 'rbd')
self.mox.StubOutWithMock(rbd_utils, 'rados')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
class UtilTestCase(test.NoDBTestCase):
def test_get_hw_disk_discard(self):
self.assertEqual('unmap', imagebackend.get_hw_disk_discard("unmap"))
self.assertEqual('ignore', imagebackend.get_hw_disk_discard("ignore"))
self.assertIsNone(imagebackend.get_hw_disk_discard(None))
self.assertRaises(RuntimeError, imagebackend.get_hw_disk_discard,
"fake")
| |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of RPCs made against gRPC Python's application-layer API."""
from concurrent import futures
import itertools
import logging
import threading
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit._rpc_test_helpers import BaseRPCTest
from tests.unit._rpc_test_helpers import Callback
from tests.unit._rpc_test_helpers import TIMEOUT_SHORT
from tests.unit._rpc_test_helpers import \
stream_stream_non_blocking_multi_callable
from tests.unit._rpc_test_helpers import \
unary_stream_non_blocking_multi_callable
from tests.unit._rpc_test_helpers import stream_stream_multi_callable
from tests.unit._rpc_test_helpers import stream_unary_multi_callable
from tests.unit._rpc_test_helpers import unary_stream_multi_callable
from tests.unit._rpc_test_helpers import unary_unary_multi_callable
from tests.unit.framework.common import test_constants
class RPCPart1Test(BaseRPCTest, unittest.TestCase):
def testExpiredStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
timeout=TIMEOUT_SHORT,
metadata=(('test',
'ExpiredStreamRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.RpcError)
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = Callback()
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
timeout=TIMEOUT_SHORT,
metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.FutureTimeoutError):
response_future.result(timeout=TIMEOUT_SHORT / 2.0)
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testExpiredStreamRequestStreamResponse(self):
self._expired_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testExpiredStreamRequestStreamResponseNonBlocking(self):
self._expired_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
def testFailedUnaryRequestBlockingUnaryResponse(self):
request = b'\x37\x17'
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
metadata=(('test',
'FailedUnaryRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
# sanity checks on to make sure returned string contains default members
# of the error
debug_error_string = exception_context.exception.debug_error_string()
self.assertIn('created', debug_error_string)
self.assertIn('description', debug_error_string)
self.assertIn('file', debug_error_string)
self.assertIn('file_line', debug_error_string)
def testFailedUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
callback = Callback()
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request,
metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.UNKNOWN,
response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
def testFailedUnaryRequestStreamResponse(self):
self._failed_unary_request_stream_response(
unary_stream_multi_callable(self._channel))
def testFailedUnaryRequestStreamResponseNonBlocking(self):
self._failed_unary_request_stream_response(
unary_stream_non_blocking_multi_callable(self._channel))
def testFailedStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
metadata=(('test',
'FailedStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = Callback()
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
self._failed_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testFailedStreamRequestStreamResponseNonBlocking(self):
self._failed_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
def testIgnoredUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
multi_callable = unary_unary_multi_callable(self._channel)
multi_callable.future(
request,
metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
def testIgnoredUnaryRequestStreamResponse(self):
self._ignored_unary_stream_request_future_unary_response(
unary_stream_multi_callable(self._channel))
def testIgnoredUnaryRequestStreamResponseNonBlocking(self):
self._ignored_unary_stream_request_future_unary_response(
unary_stream_non_blocking_multi_callable(self._channel))
def testIgnoredStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
multi_callable.future(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
def testIgnoredStreamRequestStreamResponse(self):
self._ignored_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testIgnoredStreamRequestStreamResponseNonBlocking(self):
self._ignored_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=3)
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the task controller library."""
import argparse
import datetime
import logging
import os
import socket
import subprocess
import sys
import tempfile
import threading
import xmlrpclib
#pylint: disable=relative-import
import common_lib
ISOLATE_PY = os.path.join(common_lib.SWARMING_DIR, 'isolate.py')
SWARMING_PY = os.path.join(common_lib.SWARMING_DIR, 'swarming.py')
class Error(Exception):
pass
class ConnectionTimeoutError(Error):
pass
class TaskController(object):
"""Provisions, configures, and controls a task machine.
This class is an abstraction of a physical task machine. It provides an
end to end API for controlling a task machine. Operations on the task machine
are performed using the instance's "rpc" property. A simple end to end
scenario is as follows:
task = TaskController(...)
task.Create()
task.WaitForConnection()
proc = task.rpc.subprocess.Popen(['ls'])
print task.rpc.subprocess.GetStdout(proc)
task.Release()
"""
_task_count = 0
_tasks = []
def __init__(self, isolate_file, config_vars, dimensions, priority=100,
idle_timeout_secs=common_lib.DEFAULT_TIMEOUT_SECS,
connection_timeout_secs=common_lib.DEFAULT_TIMEOUT_SECS,
verbosity='ERROR', name=None):
assert isinstance(config_vars, dict)
assert isinstance(dimensions, dict)
type(self)._tasks.append(self)
type(self)._task_count += 1
self.verbosity = verbosity
self._name = name or 'Task%d' % type(self)._task_count
self._priority = priority
self._isolate_file = isolate_file
self._isolated_file = isolate_file + 'd'
self._idle_timeout_secs = idle_timeout_secs
self._config_vars = config_vars
self._dimensions = dimensions
self._connect_event = threading.Event()
self._connected = False
self._ip_address = None
self._otp = self._CreateOTP()
self._rpc = None
parser = argparse.ArgumentParser()
parser.add_argument('--isolate-server')
parser.add_argument('--swarming-server')
parser.add_argument('--task-connection-timeout-secs',
default=common_lib.DEFAULT_TIMEOUT_SECS)
args, _ = parser.parse_known_args()
self._isolate_server = args.isolate_server
self._swarming_server = args.swarming_server
self._connection_timeout_secs = (connection_timeout_secs or
args.task_connection_timeout_secs)
@property
def name(self):
return self._name
@property
def otp(self):
return self._otp
@property
def connected(self):
return self._connected
@property
def connect_event(self):
return self._connect_event
@property
def rpc(self):
return self._rpc
@property
def verbosity(self):
return self._verbosity
@verbosity.setter
def verbosity(self, level):
"""Sets the verbosity level as a string.
Either a string ('INFO', 'DEBUG', etc) or a logging level (logging.INFO,
logging.DEBUG, etc) is allowed.
"""
assert isinstance(level, (str, int))
if isinstance(level, int):
level = logging.getLevelName(level)
self._verbosity = level #pylint: disable=attribute-defined-outside-init
@classmethod
def ReleaseAllTasks(cls):
for task in cls._tasks:
task.Release()
def _CreateOTP(self):
"""Creates the OTP."""
controller_name = socket.gethostname()
test_name = os.path.basename(sys.argv[0])
creation_time = datetime.datetime.utcnow()
otp = 'task:%s controller:%s test:%s creation:%s' % (
self._name, controller_name, test_name, creation_time)
return otp
def Create(self):
"""Creates the task machine."""
logging.info('Creating %s', self.name)
self._connect_event.clear()
self._ExecuteIsolate()
self._ExecuteSwarming()
def WaitForConnection(self):
"""Waits for the task machine to connect.
Raises:
ConnectionTimeoutError if the task doesn't connect in time.
"""
logging.info('Waiting for %s to connect with a timeout of %d seconds',
self._name, self._connection_timeout_secs)
self._connect_event.wait(self._connection_timeout_secs)
if not self._connect_event.is_set():
raise ConnectionTimeoutError('%s failed to connect' % self.name)
def Release(self):
"""Quits the task's RPC server so it can release the machine."""
if self._rpc is not None and self._connected:
logging.info('Releasing %s', self._name)
try:
self._rpc.Quit()
except (socket.error, xmlrpclib.Fault):
logging.error('Unable to connect to %s to call Quit', self.name)
self._rpc = None
self._connected = False
def _ExecuteIsolate(self):
"""Executes isolate.py."""
cmd = [
'python',
ISOLATE_PY,
'archive',
'--isolate', self._isolate_file,
'--isolated', self._isolated_file,
]
if self._isolate_server:
cmd.extend(['--isolate-server', self._isolate_server])
for key, value in self._config_vars.iteritems():
cmd.extend(['--config-var', key, value])
self._ExecuteProcess(cmd)
def _ExecuteSwarming(self):
"""Executes swarming.py."""
cmd = [
'python',
SWARMING_PY,
'trigger',
self._isolated_file,
'--priority', str(self._priority),
]
if self._isolate_server:
cmd.extend(['--isolate-server', self._isolate_server])
if self._swarming_server:
cmd.extend(['--swarming', self._swarming_server])
for key, value in self._dimensions.iteritems():
cmd.extend(['--dimension', key, value])
cmd.extend([
'--',
'--controller', common_lib.MY_IP,
'--otp', self._otp,
'--verbosity', self._verbosity,
'--idle-timeout', str(self._idle_timeout_secs),
])
self._ExecuteProcess(cmd)
def _ExecuteProcess(self, cmd):
"""Executes a process, waits for it to complete, and checks for success."""
logging.debug('Running %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode != 0:
raise Error(stderr)
def OnConnect(self, ip_address):
"""Receives task ip address on connection."""
self._ip_address = ip_address
self._connected = True
self._rpc = common_lib.ConnectToServer(self._ip_address)
logging.info('%s connected from %s', self._name, ip_address)
self._connect_event.set()
| |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The PyBuilder pluginloader module.
Provides a mechanism to load PyBuilder plugins.
"""
import sys
import tempfile
from traceback import format_exc
from pybuilder import __version__ as pyb_version
# Plugin install_dependencies_plugin can reload pip_common and pip_utils. Do not use from ... import ...
from pybuilder import pip_utils, pip_common
from pybuilder.errors import (MissingPluginException,
IncompatiblePluginException,
UnspecifiedPluginNameException,
)
from pybuilder.utils import read_file
PYPI_PLUGIN_PROTOCOL = "pypi:"
VCS_PLUGIN_PROTOCOL = "vcs:"
if pyb_version == "${dist_version}": # This is the case of PyB bootstrap
PYB_VERSION = pip_common.Version('0.0.1.dev0')
else:
PYB_VERSION = pip_common.Version(pyb_version)
class PluginLoader(object):
def __init__(self, logger):
self.logger = logger
def can_load(self, project, name, version=None, plugin_module_name=None):
pass
def load_plugin(self, project, name, version=None, plugin_module_name=None):
pass
class BuiltinPluginLoader(PluginLoader):
def can_load(self, project, name, version=None, plugin_module_name=None):
return ":" not in name
def load_plugin(self, project, name, version=None, plugin_module_name=None):
self.logger.debug("Trying to load builtin plugin '%s'", name)
builtin_plugin_name = plugin_module_name or "pybuilder.plugins.%s_plugin" % name
try:
plugin_module = _load_plugin(builtin_plugin_name, name)
except MissingPluginException as e:
self.logger.debug("Builtin plugin %s failed to load: %s", builtin_plugin_name, e.message)
raise
self.logger.debug("Found builtin plugin '%s'", builtin_plugin_name)
return plugin_module
class DownloadingPluginLoader(PluginLoader):
def can_load(self, project, name, version=None, plugin_module_name=None):
return name.startswith(PYPI_PLUGIN_PROTOCOL) or name.startswith(VCS_PLUGIN_PROTOCOL) or ":" not in name
def load_plugin(self, project, name, version=None, plugin_module_name=None):
display_name = _plugin_display_name(name, version, plugin_module_name)
update_plugin = False
force_reinstall = False
thirdparty_plugin = name
# Maybe we already installed this plugin from PyPI before
if thirdparty_plugin.startswith(PYPI_PLUGIN_PROTOCOL):
thirdparty_plugin = thirdparty_plugin.replace(PYPI_PLUGIN_PROTOCOL, "")
update_plugin = pip_utils.should_update_package(version)
elif thirdparty_plugin.startswith(VCS_PLUGIN_PROTOCOL):
if not plugin_module_name:
raise UnspecifiedPluginNameException(name)
thirdparty_plugin = plugin_module_name
force_reinstall = True
# This is done before we attempt to load a plugin regardless of whether it can be loaded or not
if update_plugin or force_reinstall:
self.logger.info("Downloading or updating plugin {0}".format(display_name))
try:
_install_external_plugin(project, name, version, self.logger, plugin_module_name, update_plugin,
force_reinstall)
self.logger.info("Installed or updated plugin {0}.".format(display_name))
except MissingPluginException as e:
self.logger.error("Could not install or upgrade plugin {0}: {1}.".format(display_name, e))
# Now let's try to load the plugin
try:
return self._load_installed_plugin(thirdparty_plugin, name)
except MissingPluginException:
if update_plugin or force_reinstall:
# If we already tried installing - fail fast
raise
self.logger.warn("Missing plugin {0}".format(display_name))
# We have failed to update or to load a plugin without a previous installation
self.logger.info("Downloading plugin {0}".format(display_name))
try:
_install_external_plugin(project, name, version, self.logger, plugin_module_name)
self.logger.info("Installed plugin {0}.".format(display_name))
except MissingPluginException as e:
self.logger.error("Could not install plugin {0}: {1}.".format(display_name, e))
raise
# After we have failed to update or load
return self._load_installed_plugin(thirdparty_plugin, name)
def _load_installed_plugin(self, thirdparty_plugin, name):
self.logger.debug("Trying to load third party plugin '%s'", thirdparty_plugin)
plugin_module = _load_plugin(thirdparty_plugin, name)
self.logger.debug("Found third party plugin '%s'", thirdparty_plugin)
return plugin_module
class DispatchingPluginLoader(PluginLoader):
def __init__(self, logger, *loaders):
super(DispatchingPluginLoader, self).__init__(logger)
self._loaders = loaders
def can_load(self, project, name, version=None, plugin_module_name=None):
for loader in self._loaders:
if loader.can_load(project, name, version, plugin_module_name):
return True
return False
def load_plugin(self, project, name, version=None, plugin_module_name=None):
last_problem = None
for loader in self._loaders:
if loader.can_load(project, name, version, plugin_module_name):
try:
return loader.load_plugin(project, name, version, plugin_module_name)
except MissingPluginException as e:
last_problem = e
if last_problem:
raise last_problem
else:
raise MissingPluginException(_plugin_display_name(name, version, plugin_module_name),
"no plugin loader was able to load the plugin specified")
def _install_external_plugin(project, name, version, logger, plugin_module_name, upgrade=False, force_reinstall=False):
if not name.startswith(PYPI_PLUGIN_PROTOCOL) and not name.startswith(VCS_PLUGIN_PROTOCOL):
message = "Only plugins starting with '{0}' are currently supported"
raise MissingPluginException(name, message.format((PYPI_PLUGIN_PROTOCOL, VCS_PLUGIN_PROTOCOL)))
if name.startswith(PYPI_PLUGIN_PROTOCOL):
pip_package = name.replace(PYPI_PLUGIN_PROTOCOL, "")
if version:
pip_package += str(version)
upgrade = True
elif name.startswith(VCS_PLUGIN_PROTOCOL):
pip_package = name.replace(VCS_PLUGIN_PROTOCOL, "")
force_reinstall = True
with tempfile.NamedTemporaryFile(mode="w+t") as log_file:
result = pip_utils.pip_install(
install_targets=pip_package,
index_url=project.get_property("install_dependencies_index_url"),
extra_index_url=project.get_property("install_dependencies_extra_index_url"),
trusted_host=project.get_property("install_dependencies_trusted_host"),
upgrade=upgrade,
force_reinstall=force_reinstall,
logger=logger,
outfile_name=log_file,
error_file_name=log_file,
cwd=".")
if result != 0:
logger.error("The following pip error was encountered:\n" + "".join(read_file(log_file)))
message = "Failed to install plugin from {0}".format(pip_package)
raise MissingPluginException(name, message)
def _plugin_display_name(name, version, plugin_module_name):
return "%s%s%s" % (name, " version %s" % version if version else "",
", module name '%s'" % plugin_module_name if plugin_module_name else "")
def _load_plugin(plugin_module_name, plugin_name):
try:
__import__(plugin_module_name)
plugin_module = sys.modules[plugin_module_name]
_check_plugin_version(plugin_module, plugin_name)
return plugin_module
except ImportError:
raise MissingPluginException(plugin_name, format_exc())
def _check_plugin_version(plugin_module, plugin_name):
if hasattr(plugin_module, "pyb_version") and plugin_module.pyb_version:
if not pip_utils.version_satisfies_spec(plugin_module.pyb_version, PYB_VERSION):
raise IncompatiblePluginException(plugin_name, plugin_module.pyb_version, PYB_VERSION)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from parameterized import parameterized
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models import Variable
from airflow.security import permissions
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_variables
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
class TestVariableEndpoint:
@pytest.fixture(autouse=True)
def setup_method(self, configured_app) -> None:
self.app = configured_app
self.client = self.app.test_client() # type:ignore
clear_db_variables()
def teardown_method(self) -> None:
clear_db_variables()
class TestDeleteVariable(TestVariableEndpoint):
def test_should_delete_variable(self):
Variable.set("delete_var1", 1)
# make sure variable is added
response = self.client.get("/api/v1/variables/delete_var1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
response = self.client.delete(
"/api/v1/variables/delete_var1", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 204
# make sure variable is deleted
response = self.client.get("/api/v1/variables/delete_var1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_respond_404_if_key_does_not_exist(self):
response = self.client.delete(
"/api/v1/variables/NONEXIST_VARIABLE_KEY", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
Variable.set("delete_var1", 1)
# make sure variable is added
response = self.client.delete("/api/v1/variables/delete_var1")
assert_401(response)
# make sure variable is not deleted
response = self.client.get("/api/v1/variables/delete_var1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
def test_should_raise_403_forbidden(self):
expected_value = '{"foo": 1}'
Variable.set("TEST_VARIABLE_KEY", expected_value)
response = self.client.get(
"/api/v1/variables/TEST_VARIABLE_KEY", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetVariable(TestVariableEndpoint):
def test_should_respond_200(self):
expected_value = '{"foo": 1}'
Variable.set("TEST_VARIABLE_KEY", expected_value)
response = self.client.get(
"/api/v1/variables/TEST_VARIABLE_KEY", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == {"key": "TEST_VARIABLE_KEY", "value": expected_value}
def test_should_respond_404_if_not_found(self):
response = self.client.get(
"/api/v1/variables/NONEXIST_VARIABLE_KEY", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
Variable.set("TEST_VARIABLE_KEY", '{"foo": 1}')
response = self.client.get("/api/v1/variables/TEST_VARIABLE_KEY")
assert_401(response)
class TestGetVariables(TestVariableEndpoint):
@parameterized.expand(
[
(
"/api/v1/variables?limit=2&offset=0",
{
"variables": [
{"key": "var1", "value": "1"},
{"key": "var2", "value": "foo"},
],
"total_entries": 3,
},
),
(
"/api/v1/variables?limit=2&offset=1",
{
"variables": [
{"key": "var2", "value": "foo"},
{"key": "var3", "value": "[100, 101]"},
],
"total_entries": 3,
},
),
(
"/api/v1/variables?limit=1&offset=2",
{
"variables": [
{"key": "var3", "value": "[100, 101]"},
],
"total_entries": 3,
},
),
]
)
def test_should_get_list_variables(self, query, expected):
Variable.set("var1", 1)
Variable.set("var2", "foo")
Variable.set("var3", "[100, 101]")
response = self.client.get(query, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json == expected
def test_should_respect_page_size_limit_default(self):
for i in range(101):
Variable.set(f"var{i}", i)
response = self.client.get("/api/v1/variables", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 101
assert len(response.json["variables"]) == 100
def test_should_raise_400_for_invalid_order_by(self):
for i in range(101):
Variable.set(f"var{i}", i)
response = self.client.get(
"/api/v1/variables?order_by=invalid", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
msg = "Ordering with 'invalid' is disallowed or the attribute does not exist on the model"
assert response.json["detail"] == msg
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self):
for i in range(200):
Variable.set(f"var{i}", i)
response = self.client.get("/api/v1/variables?limit=180", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert len(response.json['variables']) == 150
def test_should_raises_401_unauthenticated(self):
Variable.set("var1", 1)
response = self.client.get("/api/v1/variables?limit=2&offset=0")
assert_401(response)
class TestPatchVariable(TestVariableEndpoint):
def test_should_update_variable(self):
Variable.set("var1", "foo")
response = self.client.patch(
"/api/v1/variables/var1",
json={
"key": "var1",
"value": "updated",
},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
assert response.json == {
"key": "var1",
"value": "updated",
}
def test_should_reject_invalid_update(self):
Variable.set("var1", "foo")
response = self.client.patch(
"/api/v1/variables/var1",
json={
"key": "var2",
"value": "updated",
},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 400
assert response.json == {
"title": "Invalid post body",
"status": 400,
"type": EXCEPTIONS_LINK_MAP[400],
"detail": "key from request body doesn't match uri parameter",
}
response = self.client.patch(
"/api/v1/variables/var1",
json={
"key": "var2",
},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.json == {
"title": "Invalid Variable schema",
"status": 400,
"type": EXCEPTIONS_LINK_MAP[400],
"detail": "{'value': ['Missing data for required field.']}",
}
def test_should_raises_401_unauthenticated(self):
Variable.set("var1", "foo")
response = self.client.patch(
"/api/v1/variables/var1",
json={
"key": "var1",
"value": "updated",
},
)
assert_401(response)
class TestPostVariables(TestVariableEndpoint):
def test_should_create_variable(self):
response = self.client.post(
"/api/v1/variables",
json={
"key": "var_create",
"value": "{}",
},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
response = self.client.get("/api/v1/variables/var_create", environ_overrides={'REMOTE_USER': "test"})
assert response.json == {
"key": "var_create",
"value": "{}",
}
def test_should_reject_invalid_request(self):
response = self.client.post(
"/api/v1/variables",
json={
"key": "var_create",
"v": "{}",
},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 400
assert response.json == {
"title": "Invalid Variable schema",
"status": 400,
"type": EXCEPTIONS_LINK_MAP[400],
"detail": "{'value': ['Missing data for required field.'], 'v': ['Unknown field.']}",
}
def test_should_raises_401_unauthenticated(self):
response = self.client.post(
"/api/v1/variables",
json={
"key": "var_create",
"value": "{}",
},
)
assert_401(response)
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SupplyReservationLine.supply'
db.add_column(u'logistics_supplyreservationline', 'supply',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='reservations', to=orm['logistics.Supply']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SupplyReservationLine.supply'
db.delete_column(u'logistics_supplyreservationline', 'supply_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logistics.room': {
'Meta': {'object_name': 'Room'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_external_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_externals': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'conditions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'conditions_externals': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_days': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'max_days_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'logistics.roomlogging': {
'Meta': {'object_name': 'RoomLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.Room']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.roomreservation': {
'Meta': {'object_name': 'RoomReservation'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['logistics.Room']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'logistics.roomreservationlogging': {
'Meta': {'object_name': 'RoomReservationLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.RoomReservation']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.roomreservationviews': {
'Meta': {'object_name': 'RoomReservationViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.RoomReservation']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.roomviews': {
'Meta': {'object_name': 'RoomViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.Room']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supply': {
'Meta': {'object_name': 'Supply'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_external_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_externals': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'conditions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'conditions_externals': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_days': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'max_days_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'logistics.supplylogging': {
'Meta': {'object_name': 'SupplyLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.Supply']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supplyreservation': {
'Meta': {'object_name': 'SupplyReservation'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'supply': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['logistics.Supply']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'logistics.supplyreservationline': {
'Meta': {'object_name': 'SupplyReservationLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'supply': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reservations'", 'to': u"orm['logistics.Supply']"}),
'supply_reservation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['logistics.SupplyReservation']"})
},
u'logistics.supplyreservationlogging': {
'Meta': {'object_name': 'SupplyReservationLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.SupplyReservation']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supplyreservationviews': {
'Meta': {'object_name': 'SupplyReservationViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.SupplyReservation']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supplyviews': {
'Meta': {'object_name': 'SupplyViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.Supply']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'homepage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_betatester': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['logistics']
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Thread and ThreadGroup that reraise exceptions on the main thread."""
# pylint: disable=W0212
import logging
import sys
import threading
import time
import traceback
from devil.utils import watchdog_timer
class TimeoutError(Exception):
"""Module-specific timeout exception."""
pass
def LogThreadStack(thread, error_log_func=logging.critical):
"""Log the stack for the given thread.
Args:
thread: a threading.Thread instance.
error_log_func: Logging function when logging errors.
"""
stack = sys._current_frames()[thread.ident]
error_log_func('*' * 80)
error_log_func('Stack dump for thread %r', thread.name)
error_log_func('*' * 80)
for filename, lineno, name, line in traceback.extract_stack(stack):
error_log_func('File: "%s", line %d, in %s', filename, lineno, name)
if line:
error_log_func(' %s', line.strip())
error_log_func('*' * 80)
class ReraiserThread(threading.Thread):
"""Thread class that can reraise exceptions."""
def __init__(self, func, args=None, kwargs=None, name=None):
"""Initialize thread.
Args:
func: callable to call on a new thread.
args: list of positional arguments for callable, defaults to empty.
kwargs: dictionary of keyword arguments for callable, defaults to empty.
name: thread name, defaults to Thread-N.
"""
super(ReraiserThread, self).__init__(name=name)
if not args:
args = []
if not kwargs:
kwargs = {}
self.daemon = True
self._func = func
self._args = args
self._kwargs = kwargs
self._ret = None
self._exc_info = None
self._thread_group = None
def ReraiseIfException(self):
"""Reraise exception if an exception was raised in the thread."""
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
def GetReturnValue(self):
"""Reraise exception if present, otherwise get the return value."""
self.ReraiseIfException()
return self._ret
#override
def run(self):
"""Overrides Thread.run() to add support for reraising exceptions."""
try:
self._ret = self._func(*self._args, **self._kwargs)
except: # pylint: disable=W0702
self._exc_info = sys.exc_info()
class ReraiserThreadGroup(object):
"""A group of ReraiserThread objects."""
def __init__(self, threads=None):
"""Initialize thread group.
Args:
threads: a list of ReraiserThread objects; defaults to empty.
"""
self._threads = []
# Set when a thread from one group has called JoinAll on another. It is used
# to detect when a there is a TimeoutRetryThread active that links to the
# current thread.
self.blocked_parent_thread_group = None
if threads:
for thread in threads:
self.Add(thread)
def Add(self, thread):
"""Add a thread to the group.
Args:
thread: a ReraiserThread object.
"""
assert thread._thread_group is None
thread._thread_group = self
self._threads.append(thread)
def StartAll(self, will_block=False):
"""Start all threads.
Args:
will_block: Whether the calling thread will subsequently block on this
thread group. Causes the active ReraiserThreadGroup (if there is one)
to be marked as blocking on this thread group.
"""
if will_block:
# Multiple threads blocking on the same outer thread should not happen in
# practice.
assert not self.blocked_parent_thread_group
self.blocked_parent_thread_group = CurrentThreadGroup()
for thread in self._threads:
thread.start()
def _JoinAll(self, watcher=None, timeout=None):
"""Join all threads without stack dumps.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread.
Args:
watcher: Watchdog object providing the thread timeout. If none is
provided, the thread will never be timed out.
timeout: An optional number of seconds to wait before timing out the join
operation. This will not time out the threads.
"""
if watcher is None:
watcher = watchdog_timer.WatchdogTimer(None)
alive_threads = self._threads[:]
end_time = (time.time() + timeout) if timeout else None
try:
while alive_threads and (end_time is None or end_time > time.time()):
for thread in alive_threads[:]:
if watcher.IsTimedOut():
raise TimeoutError('Timed out waiting for %d of %d threads.' %
(len(alive_threads), len(self._threads)))
# Allow the main thread to periodically check for interrupts.
thread.join(0.1)
if not thread.isAlive():
alive_threads.remove(thread)
# All threads are allowed to complete before reraising exceptions.
for thread in self._threads:
thread.ReraiseIfException()
finally:
self.blocked_parent_thread_group = None
def IsAlive(self):
"""Check whether any of the threads are still alive.
Returns:
Whether any of the threads are still alive.
"""
return any(t.isAlive() for t in self._threads)
def JoinAll(self, watcher=None, timeout=None,
error_log_func=logging.critical):
"""Join all threads.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread. Unfinished threads'
stacks will be logged on watchdog timeout.
Args:
watcher: Watchdog object providing the thread timeout. If none is
provided, the thread will never be timed out.
timeout: An optional number of seconds to wait before timing out the join
operation. This will not time out the threads.
error_log_func: Logging function when logging errors.
"""
try:
self._JoinAll(watcher, timeout)
except TimeoutError:
error_log_func('Timed out. Dumping threads.')
for thread in (t for t in self._threads if t.isAlive()):
LogThreadStack(thread, error_log_func=error_log_func)
raise
def GetAllReturnValues(self, watcher=None):
"""Get all return values, joining all threads if necessary.
Args:
watcher: same as in |JoinAll|. Only used if threads are alive.
"""
if any([t.isAlive() for t in self._threads]):
self.JoinAll(watcher)
return [t.GetReturnValue() for t in self._threads]
def CurrentThreadGroup():
"""Returns the ReraiserThreadGroup that owns the running thread.
Returns:
The current thread group, otherwise None.
"""
current_thread = threading.current_thread()
if isinstance(current_thread, ReraiserThread):
return current_thread._thread_group # pylint: disable=no-member
return None
def RunAsync(funcs, watcher=None):
"""Executes the given functions in parallel and returns their results.
Args:
funcs: List of functions to perform on their own threads.
watcher: Watchdog object providing timeout, by default waits forever.
Returns:
A list of return values in the order of the given functions.
"""
thread_group = ReraiserThreadGroup(ReraiserThread(f) for f in funcs)
thread_group.StartAll(will_block=True)
return thread_group.GetAllReturnValues(watcher=watcher)
| |
"""
This module provides solvers for system-bath evoluation using the
HEOM (hierarchy equations of motion).
See https://en.wikipedia.org/wiki/Hierarchical_equations_of_motion for a very
basic introduction to the technique.
The implementation is derived from the BoFiN library (see
https://github.com/tehruhn/bofin) which was itself derived from an earlier
implementation in QuTiP itself.
"""
from copy import deepcopy
import numpy as np
import scipy.sparse as sp
import scipy.integrate
from scipy.sparse.linalg import spsolve
from qutip import settings
from qutip import state_number_enumerate
from qutip.qobj import Qobj
from qutip.qobjevo import QobjEvo
from qutip.superoperator import liouvillian, spre, spost, vec2mat
from qutip.cy.spmatfuncs import cy_ode_rhs
from qutip.solver import Options, Result
from qutip.cy.spconvert import dense2D_to_fastcsr_fmode
from qutip.ui.progressbar import BaseProgressBar, TextProgressBar
from qutip.fastsparse import fast_identity, fast_csr_matrix
from qutip.nonmarkov.bofin_baths import (
BathExponent, DrudeLorentzBath,
)
# Load MKL spsolve if avaiable
if settings.has_mkl:
from qutip._mkl.spsolve import mkl_spsolve
else:
mkl_spsolve = None
class HierarchyADOs:
"""
A description of ADOs (auxilliary density operators) with the
hierarchical equations of motion.
The list of ADOs is constructed from a list of bath exponents
(corresponding to one or more baths). Each ADO is referred to by a label
that lists the number of "excitations" of each bath exponent. The
level of a label within the hierarchy is the sum of the "excitations"
within the label.
For example the label ``(0, 0, ..., 0)`` represents the density matrix
of the system being solved and is the only 0th level label.
The labels with a single 1, i.e. ``(1, 0, ..., 0)``, ``(0, 1, 0, ... 0)``,
etc. are the 1st level labels.
The second level labels all have either two 1s or a single 2, and so on
for the third and higher levels of the hierarchy.
Parameters
----------
exponents : list of BathExponent
The exponents of the correlation function describing the bath or
baths.
max_depth : int
The maximum depth of the hierarchy (i.e. the maximum sum of
"excitations" in the hierarchy ADO labels or maximum ADO level).
Attributes
----------
exponents : list of BathExponent
The exponents of the correlation function describing the bath or
baths.
max_depth : int
The maximum depth of the hierarchy (i.e. the maximum sum of
"excitations" in the hierarchy ADO labels).
dims : list of int
The dimensions of each exponent within the bath(s).
vk : list of complex
The frequency of each exponent within the bath(s).
ck : list of complex
The coefficient of each exponent within the bath(s).
ck2: list of complex
For exponents of type "RI", the coefficient of the exponent within
the imaginary expansion. For other exponent types, the entry is None.
sigma_bar_k_offset: list of int
For exponents of type "+" or "-" the offset within the list of modes
of the corresponding "-" or "+" exponent. For other exponent types,
the entry is None.
labels: list of tuples
A list of the ADO labels within the hierarchy.
"""
def __init__(self, exponents, max_depth):
self.exponents = exponents
self.max_depth = max_depth
self.dims = [exp.dim or (max_depth + 1) for exp in self.exponents]
self.vk = [exp.vk for exp in self.exponents]
self.ck = [exp.ck for exp in self.exponents]
self.ck2 = [exp.ck2 for exp in self.exponents]
self.sigma_bar_k_offset = [
exp.sigma_bar_k_offset for exp in self.exponents
]
self.labels = list(state_number_enumerate(self.dims, max_depth))
self._label_idx = {s: i for i, s in enumerate(self.labels)}
def idx(self, label):
"""
Return the index of the ADO label within the list of labels,
i.e. within ``self.labels``.
Parameters
----------
label : tuple
The label to look up.
Returns
-------
int
The index of the label within the list of ADO labels.
"""
return self._label_idx[label]
def next(self, label, k):
"""
Return the ADO label with one more excitation in the k'th exponent
dimension or ``None`` if adding the excitation would exceed the
dimension or maximum depth of the hierarchy.
Parameters
----------
label : tuple
The ADO label to add an excitation to.
k : int
The exponent to add the excitation to.
Returns
-------
tuple or None
The next label.
"""
if label[k] >= self.dims[k] - 1:
return None
if sum(label) >= self.max_depth:
return None
return label[:k] + (label[k] + 1,) + label[k + 1:]
def prev(self, label, k):
"""
Return the ADO label with one fewer excitation in the k'th
exponent dimension or ``None`` if the label has no exciations in the
k'th exponent.
Parameters
----------
label : tuple
The ADO label to remove the excitation from.
k : int
The exponent to remove the excitation from.
Returns
-------
tuple or None
The previous label.
"""
if label[k] <= 0:
return None
return label[:k] + (label[k] - 1,) + label[k + 1:]
def exps(self, label):
"""
Converts an ADO label into a tuple of exponents, with one exponent
for each "excitation" within the label.
The number of exponents returned is always equal to the level of the
label within the hierarchy (i.e. the sum of the indices within the
label).
Parameters
----------
label : tuple
The ADO label to convert to a list of exponents.
Returns
-------
tuple of BathExponent
A tuple of BathExponents.
Examples
--------
``ados.exps((1, 0, 0))`` would return ``[ados.exponents[0]]``
``ados.exps((2, 0, 0))`` would return
``[ados.exponents[0], ados.exponents[0]]``.
``ados.exps((1, 2, 1))`` would return
``[ados.exponents[0], ados.exponents[1], ados.exponents[1], \
ados.exponents[2]]``.
"""
return sum(
((exp,) * n for (n, exp) in zip(label, self.exponents) if n > 0),
(),
)
def filter(self, level=None, tags=None, dims=None, types=None):
"""
Return a list of ADO labels for ADOs whose "excitations"
match the given patterns.
Each of the filter parameters (tags, dims, types) may be either
unspecified (None) or a list. Unspecified parameters are excluded
from the filtering.
All specified filter parameters must be lists of the same length.
Each position in the lists describes a particular excitation and
any exponent that matches the filters may supply that excitation.
The level of all labels returned is thus equal to the length of
the filter parameter lists.
Within a filter parameter list, items that are None represent
wildcards and match any value of that exponent attribute
Parameters
----------
level : int
The hierarchy depth to return ADOs from.
tags : list of object or None
Filter parameter that matches the ``.tag`` attribute of
exponents.
dims : list of int
Filter parameter that matches the ``.dim`` attribute of
exponents.
types : list of BathExponent types or list of str
Filter parameter that matches the ``.type`` attribute
of exponents. Types may be supplied by name (e.g. "R", "I", "+")
instead of by the actual type (e.g. ``BathExponent.types.R``).
Returns
-------
list of tuple
The ADO label for each ADO whose exponent excitations
(i.e. label) match the given filters or level.
"""
if types is not None:
types = [
t if t is None or isinstance(t, BathExponent.types)
else BathExponent.types[t]
for t in types
]
filters = [("tag", tags), ("type", types), ("dim", dims)]
filters = [(attr, f) for attr, f in filters if f is not None]
n = max((len(f) for _, f in filters), default=0)
if any(len(f) != n for _, f in filters):
raise ValueError(
"The tags, dims and types filters must all be the same length."
)
if n > self.max_depth:
raise ValueError(
f"The maximum depth for the hierarchy is {self.max_depth} but"
f" {n} levels of excitation filters were given."
)
if level is None:
if not filters:
# fast path for when there are no excitation filters
return self.labels[:]
else:
if not filters:
# fast path for when there are no excitation filters
return [label for label in self.labels if sum(label) == level]
if level != n:
raise ValueError(
f"The level parameter is {level} but {n} levels of"
" excitation filters were given."
)
filtered_dims = [1] * len(self.exponents)
for lvl in range(n):
level_filters = [
(attr, f[lvl]) for attr, f in filters
if f[lvl] is not None
]
for j, exp in enumerate(self.exponents):
if any(getattr(exp, attr) != f for attr, f in level_filters):
continue
filtered_dims[j] += 1
filtered_dims[j] = min(self.dims[j], filtered_dims[j])
return [
label for label in state_number_enumerate(filtered_dims, n)
if sum(label) == n
]
class HierarchyADOsState:
"""
Provides convenient access to the full hierarchy ADO state at a particular
point in time, ``t``.
Parameters
----------
rho : :class:`Qobj`
The current state of the system (i.e. the 0th component of the
hierarchy).
ados : :class:`HierarchyADOs`
The description of the hierarchy.
ado_state : numpy.array
The full state of the hierarchy.
Attributes
----------
rho : Qobj
The system state.
In addition, all of the attributes of the hierarchy description,
i.e. ``HierarchyADOs``, are provided directly on this class for
convenience. E.g. one can access ``.labels``, or ``.exponents`` or
call ``.idx(label)`` directly.
See :class:`HierarchyADOs` for a full list of the available attributes
and methods.
"""
def __init__(self, rho, ados, ado_state):
self.rho = rho
self._ado_state = ado_state
self._ados = ados
def __getattr__(self, name):
return getattr(self._ados, name)
def extract(self, idx_or_label):
"""
Extract a Qobj representing specified ADO from a full representation of
the ADO states.
Parameters
----------
idx : int or label
The index of the ADO to extract. If an ADO label, e.g.
``(0, 1, 0, ...)`` is supplied instead, then the ADO
is extracted by label instead.
Returns
-------
Qobj
A :obj:`Qobj` representing the state of the specified ADO.
"""
if isinstance(idx_or_label, int):
idx = idx_or_label
else:
idx = self._ados.idx(idx_or_label)
return Qobj(self._ado_state[idx, :].T, dims=self.rho.dims)
class HEOMSolver:
"""
HEOM solver that supports multiple baths.
The baths must be all either bosonic or fermionic baths.
Parameters
----------
H_sys : QObj, QobjEvo or a list
The system Hamiltonian or Liouvillian specified as either a
:obj:`Qobj`, a :obj:`QobjEvo`, or a list of elements that may
be converted to a :obj:`ObjEvo`.
bath : Bath or list of Bath
A :obj:`Bath` containing the exponents of the expansion of the
bath correlation funcion and their associated coefficients
and coupling operators, or a list of baths.
If multiple baths are given, they must all be either fermionic
or bosonic baths.
max_depth : int
The maximum depth of the heirarchy (i.e. the maximum number of bath
exponent "excitations" to retain).
options : :class:`qutip.solver.Options`
Generic solver options. If set to None the default options will be
used.
progress_bar : None, True or :class:`BaseProgressBar`
Optional instance of BaseProgressBar, or a subclass thereof, for
showing the progress of the solver. If True, an instance of
:class:`TextProgressBar` is used instead.
Attributes
----------
ados : :obj:`HierarchyADOs`
The description of the hierarchy constructed from the given bath
and maximum depth.
"""
def __init__(
self, H_sys, bath, max_depth, options=None, progress_bar=None,
):
self.H_sys = self._convert_h_sys(H_sys)
self.options = Options() if options is None else options
self._is_timedep = isinstance(self.H_sys, QobjEvo)
self._H0 = self.H_sys.to_list()[0] if self._is_timedep else self.H_sys
self._is_hamiltonian = self._H0.type == "oper"
self._L0 = liouvillian(self._H0) if self._is_hamiltonian else self._H0
self._sys_shape = (
self._H0.shape[0] if self._is_hamiltonian
else int(np.sqrt(self._H0.shape[0]))
)
self._sup_shape = self._L0.shape[0]
self._sys_dims = (
self._H0.dims if self._is_hamiltonian
else self._H0.dims[0]
)
self.ados = HierarchyADOs(
self._combine_bath_exponents(bath), max_depth,
)
self._n_ados = len(self.ados.labels)
self._n_exponents = len(self.ados.exponents)
# pre-calculate identity matrix required by _grad_n
self._sId = fast_identity(self._sup_shape)
# pre-calculate superoperators required by _grad_prev and _grad_next:
Qs = [exp.Q for exp in self.ados.exponents]
self._spreQ = [spre(op).data for op in Qs]
self._spostQ = [spost(op).data for op in Qs]
self._s_pre_minus_post_Q = [
self._spreQ[k] - self._spostQ[k] for k in range(self._n_exponents)
]
self._s_pre_plus_post_Q = [
self._spreQ[k] + self._spostQ[k] for k in range(self._n_exponents)
]
self._spreQdag = [spre(op.dag()).data for op in Qs]
self._spostQdag = [spost(op.dag()).data for op in Qs]
self._s_pre_minus_post_Qdag = [
self._spreQdag[k] - self._spostQdag[k]
for k in range(self._n_exponents)
]
self._s_pre_plus_post_Qdag = [
self._spreQdag[k] + self._spostQdag[k]
for k in range(self._n_exponents)
]
if progress_bar is None:
self.progress_bar = BaseProgressBar()
if progress_bar is True:
self.progress_bar = TextProgressBar()
self._configure_solver()
def _convert_h_sys(self, H_sys):
""" Process input system Hamiltonian, converting and raising as needed.
"""
if isinstance(H_sys, (Qobj, QobjEvo)):
pass
elif isinstance(H_sys, list):
try:
H_sys = QobjEvo(H_sys)
except Exception as err:
raise ValueError(
"Hamiltonian (H_sys) of type list cannot be converted to"
" QObjEvo"
) from err
else:
raise TypeError(
f"Hamiltonian (H_sys) has unsupported type: {type(H_sys)!r}")
return H_sys
def _combine_bath_exponents(self, bath):
""" Combine the exponents for the specified baths. """
if not isinstance(bath, (list, tuple)):
exponents = bath.exponents
else:
exponents = []
for b in bath:
exponents.extend(b.exponents)
all_bosonic = all(
exp.type in (exp.types.R, exp.types.I, exp.types.RI)
for exp in exponents
)
all_fermionic = all(
exp.type in (exp.types["+"], exp.types["-"])
for exp in exponents
)
if not (all_bosonic or all_fermionic):
raise ValueError(
"Bath exponents are currently restricted to being either"
" all bosonic or all fermionic, but a mixture of bath"
" exponents was given."
)
if not all(exp.Q.dims == exponents[0].Q.dims for exp in exponents):
raise ValueError(
"All bath exponents must have system coupling operators"
" with the same dimensions but a mixture of dimensions"
" was given."
)
return exponents
def _dsuper_list_td(self, t, y, L_list):
""" Auxiliary function for the time-dependent integration. Called every
time step.
"""
L = L_list[0][0]
for n in range(1, len(L_list)):
L = L + L_list[n][0] * L_list[n][1](t)
return L * y
def _grad_n(self, L, he_n):
""" Get the gradient for the hierarchy ADO at level n. """
vk = self.ados.vk
vk_sum = sum(he_n[i] * vk[i] for i in range(len(vk)))
op = L - vk_sum * self._sId
return op
def _grad_prev(self, he_n, k):
""" Get the previous gradient. """
if self.ados.exponents[k].type in (
BathExponent.types.R, BathExponent.types.I,
BathExponent.types.RI
):
return self._grad_prev_bosonic(he_n, k)
elif self.ados.exponents[k].type in (
BathExponent.types["+"], BathExponent.types["-"]
):
return self._grad_prev_fermionic(he_n, k)
else:
raise ValueError(
f"Mode {k} has unsupported type {self.ados.exponents[k].type}")
def _grad_prev_bosonic(self, he_n, k):
if self.ados.exponents[k].type == BathExponent.types.R:
op = (-1j * he_n[k] * self.ados.ck[k]) * (
self._s_pre_minus_post_Q[k]
)
elif self.ados.exponents[k].type == BathExponent.types.I:
op = (-1j * he_n[k] * 1j * self.ados.ck[k]) * (
self._s_pre_plus_post_Q[k]
)
elif self.ados.exponents[k].type == BathExponent.types.RI:
term1 = (he_n[k] * -1j * self.ados.ck[k]) * (
self._s_pre_minus_post_Q[k]
)
term2 = (he_n[k] * self.ados.ck2[k]) * self._s_pre_plus_post_Q[k]
op = term1 + term2
else:
raise ValueError(
f"Unsupported type {self.ados.exponents[k].type}"
f" for exponent {k}"
)
return op
def _grad_prev_fermionic(self, he_n, k):
ck = self.ados.ck
n_excite = sum(he_n)
sign1 = (-1) ** (n_excite + 1)
n_excite_before_m = sum(he_n[:k])
sign2 = (-1) ** (n_excite_before_m)
sigma_bar_k = k + self.ados.sigma_bar_k_offset[k]
if self.ados.exponents[k].type == BathExponent.types["+"]:
op = -1j * sign2 * (
(ck[k] * self._spreQdag[k]) -
(sign1 * np.conj(ck[sigma_bar_k]) * self._spostQdag[k])
)
elif self.ados.exponents[k].type == BathExponent.types["-"]:
op = -1j * sign2 * (
(ck[k] * self._spreQ[k]) -
(sign1 * np.conj(ck[sigma_bar_k]) * self._spostQ[k])
)
else:
raise ValueError(
f"Unsupported type {self.ados.exponents[k].type}"
f" for exponent {k}"
)
return op
def _grad_next(self, he_n, k):
""" Get the previous gradient. """
if self.ados.exponents[k].type in (
BathExponent.types.R, BathExponent.types.I,
BathExponent.types.RI
):
return self._grad_next_bosonic(he_n, k)
elif self.ados.exponents[k].type in (
BathExponent.types["+"], BathExponent.types["-"]
):
return self._grad_next_fermionic(he_n, k)
else:
raise ValueError(
f"Mode {k} has unsupported type {self.ados.exponents[k].type}")
def _grad_next_bosonic(self, he_n, k):
op = -1j * self._s_pre_minus_post_Q[k]
return op
def _grad_next_fermionic(self, he_n, k):
n_excite = sum(he_n)
sign1 = (-1) ** (n_excite + 1)
n_excite_before_m = sum(he_n[:k])
sign2 = (-1) ** (n_excite_before_m)
if self.ados.exponents[k].type == BathExponent.types["+"]:
if sign1 == -1:
op = (-1j * sign2) * self._s_pre_minus_post_Q[k]
else:
op = (-1j * sign2) * self._s_pre_plus_post_Q[k]
elif self.ados.exponents[k].type == BathExponent.types["-"]:
if sign1 == -1:
op = (-1j * sign2) * self._s_pre_minus_post_Qdag[k]
else:
op = (-1j * sign2) * self._s_pre_plus_post_Qdag[k]
else:
raise ValueError(
f"Unsupported type {self.ados.exponents[k].type}"
f" for exponent {k}"
)
return op
def _rhs(self, L):
""" Make the RHS for the HEOM. """
ops = _GatherHEOMRHS(self.ados.idx, block=L.shape[0], nhe=self._n_ados)
for he_n in self.ados.labels:
op = self._grad_n(L, he_n)
ops.add_op(he_n, he_n, op)
for k in range(len(self.ados.dims)):
next_he = self.ados.next(he_n, k)
if next_he is not None:
op = self._grad_next(he_n, k)
ops.add_op(he_n, next_he, op)
prev_he = self.ados.prev(he_n, k)
if prev_he is not None:
op = self._grad_prev(he_n, k)
ops.add_op(he_n, prev_he, op)
return ops.gather()
def _configure_solver(self):
""" Set up the solver. """
RHSmat = self._rhs(self._L0.data)
assert isinstance(RHSmat, sp.csr_matrix)
if self._is_timedep:
# In the time dependent case, we construct the parameters
# for the ODE gradient function _dsuper_list_td under the
# assumption that RHSmat(t) = RHSmat + time dependent terms
# that only affect the diagonal blocks of the RHS matrix.
# This assumption holds because only _grad_n dependents on
# the system Liovillian (and not _grad_prev or _grad_next).
h_identity_mat = sp.identity(self._n_ados, format="csr")
H_list = self.H_sys.to_list()
solver_params = [[RHSmat]]
for idx in range(1, len(H_list)):
temp_mat = sp.kron(
h_identity_mat, liouvillian(H_list[idx][0])
)
solver_params.append([temp_mat, H_list[idx][1]])
solver = scipy.integrate.ode(self._dsuper_list_td)
solver.set_f_params(solver_params)
else:
solver = scipy.integrate.ode(cy_ode_rhs)
solver.set_f_params(RHSmat.data, RHSmat.indices, RHSmat.indptr)
solver.set_integrator(
"zvode",
method=self.options.method,
order=self.options.order,
atol=self.options.atol,
rtol=self.options.rtol,
nsteps=self.options.nsteps,
first_step=self.options.first_step,
min_step=self.options.min_step,
max_step=self.options.max_step,
)
self._ode = solver
self.RHSmat = RHSmat
def steady_state(
self,
use_mkl=True, mkl_max_iter_refine=100, mkl_weighted_matching=False
):
"""
Compute the steady state of the system.
Parameters
----------
use_mkl : bool, default=False
Whether to use mkl or not. If mkl is not installed or if
this is false, use the scipy splu solver instead.
mkl_max_iter_refine : int
Specifies the the maximum number of iterative refinement steps that
the MKL PARDISO solver performs.
For a complete description, see iparm(8) in
http://cali2.unilim.fr/intel-xe/mkl/mklman/GUID-264E311E-ACED-4D56-AC31-E9D3B11D1CBF.htm.
mkl_weighted_matching : bool
MKL PARDISO can use a maximum weighted matching algorithm to
permute large elements close the diagonal. This strategy adds an
additional level of reliability to the factorization methods.
For a complete description, see iparm(13) in
http://cali2.unilim.fr/intel-xe/mkl/mklman/GUID-264E311E-ACED-4D56-AC31-E9D3B11D1CBF.htm.
Returns
-------
steady_state : Qobj
The steady state density matrix of the system.
steady_ados : :class:`HierarchyADOsState`
The steady state of the full ADO hierarchy. A particular ADO may be
extracted from the full state by calling :meth:`.extract`.
"""
n = self._sys_shape
b_mat = np.zeros(n ** 2 * self._n_ados, dtype=complex)
b_mat[0] = 1.0
L = deepcopy(self.RHSmat)
L = L.tolil()
L[0, 0: n ** 2 * self._n_ados] = 0.0
L = L.tocsr()
L += sp.csr_matrix((
np.ones(n),
(np.zeros(n), [num * (n + 1) for num in range(n)])
), shape=(n ** 2 * self._n_ados, n ** 2 * self._n_ados))
if mkl_spsolve is not None and use_mkl:
L.sort_indices()
solution = mkl_spsolve(
L,
b_mat,
perm=None,
verbose=False,
max_iter_refine=mkl_max_iter_refine,
scaling_vectors=True,
weighted_matching=mkl_weighted_matching,
)
else:
L = L.tocsc()
solution = spsolve(L, b_mat)
data = dense2D_to_fastcsr_fmode(vec2mat(solution[:n ** 2]), n, n)
data = 0.5 * (data + data.H)
steady_state = Qobj(data, dims=self._sys_dims)
solution = solution.reshape((self._n_ados, n, n))
steady_ados = HierarchyADOsState(steady_state, self.ados, solution)
return steady_state, steady_ados
def _convert_e_ops(self, e_ops):
"""
Parse and convert a dictionary or list of e_ops.
Returns
-------
e_ops, expected : tuple
If the input ``e_ops`` was a list or scalar, ``expected`` is a list
with one item for each element of the original e_ops.
If the input ``e_ops`` was a dictionary, ``expected`` is a
dictionary with the same keys.
The output ``e_ops`` is always a dictionary. Its keys are the
keys or indexes for ``expected`` and its elements are the e_ops
functions or callables.
"""
if isinstance(e_ops, (list, dict)):
pass
elif e_ops is None:
e_ops = []
elif isinstance(e_ops, Qobj):
e_ops = [e_ops]
elif callable(e_ops):
e_ops = [e_ops]
else:
try:
e_ops = list(e_ops)
except Exception as err:
raise TypeError(
"e_ops must be an iterable, Qobj or function"
) from err
if isinstance(e_ops, dict):
expected = {k: [] for k in e_ops}
else:
expected = [[] for _ in e_ops]
e_ops = {i: op for i, op in enumerate(e_ops)}
if not all(
callable(op) or isinstance(op, Qobj) for op in e_ops.values()
):
raise TypeError("e_ops must only contain Qobj or functions")
return e_ops, expected
def run(self, rho0, tlist, e_ops=None, ado_init=False, ado_return=False):
"""
Solve for the time evolution of the system.
Parameters
----------
rho0 : Qobj or HierarchyADOsState or numpy.array
Initial state (:obj:`~Qobj` density matrix) of the system
if ``ado_init`` is ``False``.
If ``ado_init`` is ``True``, then ``rho0`` should be an
instance of :obj:`~HierarchyADOsState` or a numpy array
giving the initial state of all ADOs. Usually
the state of the ADOs would be determine from a previous call
to ``.run(..., ado_return=True)``. For example,
``result = solver.run(..., ado_return=True)`` could be followed
by ``solver.run(result.ado_states[-1], tlist, ado_init=True)``.
If a numpy array is passed its shape must be
``(number_of_ados, n, n)`` where ``(n, n)`` is the system shape
(i.e. shape of the system density matrix) and the ADOs must be
in the same order as in ``.ados.labels``.
tlist : list
An ordered list of times at which to return the value of the state.
e_ops : Qobj / callable / list / dict / None, optional
A list or dictionary of operators as `Qobj` and/or callable
functions (they can be mixed) or a single operator or callable
function. For an operator ``op``, the result will be computed
using ``(state * op).tr()`` and the state at each time ``t``. For
callable functions, ``f``, the result is computed using
``f(t, ado_state)``. The values are stored in ``expect`` on
(see the return section below).
ado_init: bool, default False
Indicates if initial condition is just the system state, or a
numpy array including all ADOs.
ado_return: bool, default True
Whether to also return as output the full state of all ADOs.
Returns
-------
:class:`qutip.solver.Result`
The results of the simulation run, with the following attributes:
* ``times``: the times ``t`` (i.e. the ``tlist``).
* ``states``: the system state at each time ``t`` (only available
if ``e_ops`` was ``None`` or if the solver option
``store_states`` was set to ``True``).
* ``ado_states``: the full ADO state at each time (only available
if ``ado_return`` was set to ``True``). Each element is an
instance of :class:`HierarchyADOsState`. .
The state of a particular ADO may be extracted from
``result.ado_states[i]`` by calling :meth:`.extract`.
* ``expect``: the value of each ``e_ops`` at time ``t`` (only
available if ``e_ops`` were given). If ``e_ops`` was passed
as a dictionary, then ``expect`` will be a dictionary with
the same keys as ``e_ops`` and values giving the list of
outcomes for the corresponding key.
"""
e_ops, expected = self._convert_e_ops(e_ops)
e_ops_callables = any(
not isinstance(op, Qobj) for op in e_ops.values()
)
n = self._sys_shape
rho_shape = (n, n)
rho_dims = self._sys_dims
hierarchy_shape = (self._n_ados, n, n)
output = Result()
output.solver = "HEOMSolver"
output.times = tlist
if e_ops:
output.expect = expected
if not e_ops or self.options.store_states:
output.states = []
if ado_init:
if isinstance(rho0, HierarchyADOsState):
rho0_he = rho0._ado_state
else:
rho0_he = rho0
if rho0_he.shape != hierarchy_shape:
raise ValueError(
f"ADOs passed with ado_init have shape {rho0_he.shape}"
f"but the solver hierarchy shape is {hierarchy_shape}"
)
rho0_he = rho0_he.reshape(n ** 2 * self._n_ados)
else:
rho0_he = np.zeros([n ** 2 * self._n_ados], dtype=complex)
rho0_he[:n ** 2] = rho0.full().ravel('F')
if ado_return:
output.ado_states = []
solver = self._ode
solver.set_initial_value(rho0_he, tlist[0])
self.progress_bar.start(len(tlist))
for t_idx, t in enumerate(tlist):
self.progress_bar.update(t_idx)
if t_idx != 0:
solver.integrate(t)
if not solver.successful():
raise RuntimeError(
"HEOMSolver ODE integration error. Try increasing"
" the nsteps given in the HEOMSolver options"
" (which increases the allowed substeps in each"
" step between times given in tlist).")
rho = Qobj(
solver.y[:n ** 2].reshape(rho_shape, order='F'),
dims=rho_dims,
)
if self.options.store_states:
output.states.append(rho)
if ado_return or e_ops_callables:
ado_state = HierarchyADOsState(
rho, self.ados, solver.y.reshape(hierarchy_shape)
)
if ado_return:
output.ado_states.append(ado_state)
for e_key, e_op in e_ops.items():
if isinstance(e_op, Qobj):
e_result = (rho * e_op).tr()
else:
e_result = e_op(t, ado_state)
output.expect[e_key].append(e_result)
self.progress_bar.finished()
return output
class HSolverDL(HEOMSolver):
"""
A helper class for creating an :class:`HEOMSolver` that is backwards
compatible with the ``HSolverDL`` provided in ``qutip.nonmarkov.heom``
in QuTiP 4.6 and below.
See :class:`HEOMSolver` and :class:`DrudeLorentzBath` for more
descriptions of the underlying solver and bath construction.
An exact copy of the QuTiP 4.6 HSolverDL is provided in
``qutip.nonmarkov.dlheom_solver`` for cases where the functionality of
the older solver is required. The older solver will be completely
removed in QuTiP 5.
.. note::
Unlike the version of ``HSolverDL`` in QuTiP 4.6, this solver
supports supplying a time-dependent or Liouvillian ``H_sys``.
.. note::
For compatibility with ``HSolverDL`` in QuTiP 4.6 and below, the
parameter ``N_exp`` specifying the number of exponents to keep in
the expansion of the bath correlation function is one more than
the equivalent ``Nk`` used in the :class:`DrudeLorentzBath`. I.e.,
``Nk = N_exp - 1``. The ``Nk`` parameter in the
:class:`DrudeLorentzBath` does not count the zeroeth exponent in
order to better match common usage in the literature.
.. note::
The ``stats`` and ``renorm`` arguments accepted in QuTiP 4.6 and below
are no longer supported.
Parameters
----------
H_sys : Qobj or QobjEvo or list
The system Hamiltonian or Liouvillian. See :class:`HEOMSolver` for
a complete description.
coup_op : Qobj
Operator describing the coupling between system and bath.
See parameter ``Q`` in :class:`BosonicBath` for a complete description.
coup_strength : float
Coupling strength. Referred to as ``lam`` in :class:`DrudeLorentzBath`.
temperature : float
Bath temperature. Referred to as ``T`` in :class:`DrudeLorentzBath`.
N_cut : int
The maximum depth of the hierarchy. See ``max_depth`` in
:class:`HEOMSolver` for a full description.
N_exp : int
Number of exponential terms used to approximate the bath correlation
functions. The equivalent ``Nk`` in :class:`DrudeLorentzBath` is one
less than ``N_exp`` (see note above).
cut_freq : float
Bath spectral density cutoff frequency. Referred to as ``gamma`` in
:class:`DrudeLorentzBath`.
bnd_cut_approx : bool
Use boundary cut off approximation. If true, the Matsubara
terminator is added to the system Liouvillian (and H_sys is
promoted to a Liouvillian if it was a Hamiltonian).
progress_bar : None, True or :class:`BaseProgressBar`
Optional instance of BaseProgressBar, or a subclass thereof, for
showing the progress of the solver. If True, an instance of
:class:`TextProgressBar` is used instead.
options : :class:`qutip.solver.Options`
Generic solver options.
If set to None the default options will be used.
progress_bar : None, True or :class:`BaseProgressBar`
Optional instance of BaseProgressBar, or a subclass thereof, for
showing the progress of the solver. If True, an instance of
:class:`TextProgressBar` is used instead.
combine : bool, default True
Whether to combine exponents with the same frequency (and coupling
operator). See :meth:`BosonicBath.combine` for details.
"""
def __init__(
self, H_sys, coup_op, coup_strength, temperature,
N_cut, N_exp, cut_freq, bnd_cut_approx=False, options=None,
progress_bar=None, combine=True,
):
bath = DrudeLorentzBath(
Q=coup_op,
lam=coup_strength,
gamma=cut_freq,
Nk=N_exp - 1,
T=temperature,
combine=combine,
)
if bnd_cut_approx:
# upgrade H_sys to a Liouvillian if needed and add the
# bath terminator
H_sys = self._convert_h_sys(H_sys)
is_timedep = isinstance(H_sys, QobjEvo)
H0 = H_sys.to_list()[0] if is_timedep else H_sys
is_hamiltonian = H0.type == "oper"
if is_hamiltonian:
H_sys = liouvillian(H_sys)
_, terminator = bath.terminator()
H_sys = H_sys + terminator
super().__init__(
H_sys, bath=bath, max_depth=N_cut, options=options,
progress_bar=progress_bar,
)
# store input parameters as attributes for politeness and compatibility
# with HSolverDL in QuTiP 4.6 and below.
self.coup_strength = coup_strength
self.cut_freq = cut_freq
self.temperature = temperature
self.N_exp = N_exp
self.bnd_cut_approx = bnd_cut_approx
class _GatherHEOMRHS:
""" A class for collecting elements of the right-hand side matrix
of the HEOM.
Parameters
----------
f_idx: function(he_state) -> he_idx
A function that returns the index of a hierarchy state
(i.e. an ADO label).
block : int
The size of a single ADO Liovillian operator in the hierarchy.
nhe : int
The number of ADOs in the hierarchy.
"""
def __init__(self, f_idx, block, nhe):
self._block = block
self._nhe = nhe
self._f_idx = f_idx
self._ops = []
def add_op(self, row_he, col_he, op):
""" Add an block operator to the list. """
self._ops.append(
(self._f_idx(row_he), self._f_idx(col_he), op)
)
def gather(self):
""" Create the HEOM liouvillian from a sorted list of smaller (fast) CSR
matrices.
.. note::
The list of operators contains tuples of the form
``(row_idx, col_idx, op)``. The row_idx and col_idx give the
*block* row and column for each op. An operator with
block indices ``(N, M)`` is placed at position
``[N * block: (N + 1) * block, M * block: (M + 1) * block]``
in the output matrix.
Returns
-------
rhs : :obj:`qutip.fastsparse.fast_csr_matrix`
A combined matrix of shape ``(block * nhe, block * ne)``.
"""
block = self._block
nhe = self._nhe
ops = self._ops
shape = (block * nhe, block * nhe)
if not ops:
return sp.csr_matrix(shape, dtype=np.complex128)
ops.sort()
nnz = sum(op.nnz for _, _, op in ops)
indptr = np.zeros(shape[0] + 1, dtype=np.int32)
indices = np.zeros(nnz, dtype=np.int32)
data = np.zeros(nnz, dtype=np.complex128)
end = 0
op_idx = 0
op_len = len(ops)
for row_idx in range(nhe):
prev_op_idx = op_idx
while op_idx < op_len:
if ops[op_idx][0] != row_idx:
break
op_idx += 1
row_ops = ops[prev_op_idx: op_idx]
rowpos = row_idx * block
for op_row in range(block):
for _, col_idx, op in row_ops:
colpos = col_idx * block
op_row_start = op.indptr[op_row]
op_row_end = op.indptr[op_row + 1]
op_row_len = op_row_end - op_row_start
if op_row_len == 0:
continue
indices[end: end + op_row_len] = (
op.indices[op_row_start: op_row_end] + colpos
)
data[end: end + op_row_len] = (
op.data[op_row_start: op_row_end]
)
end += op_row_len
indptr[rowpos + op_row + 1] = end
return fast_csr_matrix(
(data, indices, indptr), shape=shape, dtype=np.complex128,
)
| |
import itertools
from typing import Optional, Text, List, Dict, Tuple, Any
import numpy as np
import pytest
import scipy.sparse
from rasa.shared.nlu.training_data.features import Features
from rasa.shared.nlu.constants import (
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
TEXT,
INTENT,
)
@pytest.mark.parametrize(
"type,is_sparse,",
itertools.product([FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE], [True, False]),
)
def test_print(type: Text, is_sparse: bool):
first_dim = 1 if type == FEATURE_TYPE_SEQUENCE else 3
matrix = np.full(shape=(first_dim, 2), fill_value=1)
if is_sparse:
matrix = scipy.sparse.coo_matrix(matrix)
feat = Features(
features=matrix,
attribute="fixed-attribute",
feature_type=type,
origin="origin--doesn't-matter-here",
)
assert repr(feat)
assert str(feat)
def test_combine_with_existing_dense_features():
existing_features = Features(
np.array([[1, 0, 2, 3], [2, 0, 0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "test"
)
new_features = Features(
np.array([[1, 0], [0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin"
)
expected_features = np.array([[1, 0, 2, 3, 1, 0], [2, 0, 0, 1, 0, 1]])
existing_features.combine_with_features(new_features)
assert np.all(expected_features == existing_features.features)
def test_combine_with_existing_dense_features_shape_mismatch():
existing_features = Features(
np.array([[1, 0, 2, 3], [2, 0, 0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "test"
)
new_features = Features(np.array([[0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin")
with pytest.raises(ValueError):
existing_features.combine_with_features(new_features)
def test_combine_with_existing_sparse_features():
existing_features = Features(
scipy.sparse.csr_matrix([[1, 0, 2, 3], [2, 0, 0, 1]]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
)
new_features = Features(
scipy.sparse.csr_matrix([[1, 0], [0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin"
)
expected_features = [[1, 0, 2, 3, 1, 0], [2, 0, 0, 1, 0, 1]]
existing_features.combine_with_features(new_features)
actual_features = existing_features.features.toarray()
assert np.all(expected_features == actual_features)
def test_combine_with_existing_sparse_features_shape_mismatch():
existing_features = Features(
scipy.sparse.csr_matrix([[1, 0, 2, 3], [2, 0, 0, 1]]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
)
new_features = Features(
scipy.sparse.csr_matrix([[0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin"
)
with pytest.raises(ValueError):
existing_features.combine_with_features(new_features)
def test_for_features_fingerprinting_collisions():
"""Tests that features fingerprints are unique."""
m1 = np.asarray([[0.5, 3.1, 3.0], [1.1, 1.2, 1.3], [4.7, 0.3, 2.7]])
m2 = np.asarray([[0, 0, 0], [1, 2, 3], [0, 0, 1]])
dense_features = [
Features(m1, FEATURE_TYPE_SENTENCE, TEXT, "CountVectorsFeaturizer"),
Features(m2, FEATURE_TYPE_SENTENCE, TEXT, "CountVectorsFeaturizer"),
Features(m1, FEATURE_TYPE_SEQUENCE, TEXT, "CountVectorsFeaturizer"),
Features(m1, FEATURE_TYPE_SEQUENCE, TEXT, "RegexFeaturizer"),
Features(m1, FEATURE_TYPE_SENTENCE, INTENT, "CountVectorsFeaturizer"),
]
dense_fingerprints = {f.fingerprint() for f in dense_features}
assert len(dense_fingerprints) == len(dense_features)
sparse_features = [
Features(
scipy.sparse.coo_matrix(m1),
FEATURE_TYPE_SENTENCE,
TEXT,
"CountVectorsFeaturizer",
),
Features(
scipy.sparse.coo_matrix(m2),
FEATURE_TYPE_SENTENCE,
TEXT,
"CountVectorsFeaturizer",
),
Features(
scipy.sparse.coo_matrix(m1),
FEATURE_TYPE_SEQUENCE,
TEXT,
"CountVectorsFeaturizer",
),
Features(
scipy.sparse.coo_matrix(m1), FEATURE_TYPE_SEQUENCE, TEXT, "RegexFeaturizer"
),
Features(
scipy.sparse.coo_matrix(m1),
FEATURE_TYPE_SENTENCE,
INTENT,
"CountVectorsFeaturizer",
),
]
sparse_fingerprints = {f.fingerprint() for f in sparse_features}
assert len(sparse_fingerprints) == len(sparse_features)
def test_feature_fingerprints_take_into_account_full_array():
"""Tests that fingerprint isn't using summary/abbreviated array info."""
big_array = np.random.random((128, 128))
f1 = Features(big_array, FEATURE_TYPE_SENTENCE, TEXT, "RegexFeaturizer")
big_array_with_zero = np.copy(big_array)
big_array_with_zero[64, 64] = 0.0
f2 = Features(big_array_with_zero, FEATURE_TYPE_SENTENCE, TEXT, "RegexFeaturizer")
assert f1.fingerprint() != f2.fingerprint()
f1_sparse = Features(
scipy.sparse.coo_matrix(big_array),
FEATURE_TYPE_SENTENCE,
TEXT,
"RegexFeaturizer",
)
f2_sparse = Features(
scipy.sparse.coo_matrix(big_array_with_zero),
FEATURE_TYPE_SENTENCE,
TEXT,
"RegexFeaturizer",
)
assert f1_sparse.fingerprint() != f2_sparse.fingerprint()
def _generate_feature_list_and_modifications(
is_sparse: bool, type: Text, number: int
) -> Tuple[List[Features], List[Dict[Text, Any]]]:
"""Creates a list of features with the required properties and some modifications.
The modifications are given by a list of kwargs dictionaries that can be used to
instantiate `Features` that differ from the aforementioned list of features in
exactly one property (i.e. type, sequence length (if the given `type` is
sequence type only), attribute, origin)
Args:
is_sparse: whether all features should be sparse
type: the type to be used for all features
number: the number of features to generate
Returns:
a tuple containing a list of features with the requested attributes and
a list of kwargs dictionaries that can be used to instantiate `Features` that
differ from the aforementioned list of features in exactly one property
"""
seq_len = 3
first_dim = 1 if type == FEATURE_TYPE_SENTENCE else 3
# create list of features whose properties match - except the shapes and
# feature values which are chosen in a specific way
features_list = []
for idx in range(number):
matrix = np.full(shape=(first_dim, idx + 1), fill_value=idx + 1)
if is_sparse:
matrix = scipy.sparse.coo_matrix(matrix)
config = dict(
features=matrix,
attribute="fixed-attribute",
feature_type=type,
origin=f"origin-{idx}",
)
feat = Features(**config)
features_list.append(feat)
# prepare some Features that differ from the features above in certain ways
modifications = []
# - if we modify one attribute
modifications.append({**config, **{"attribute": "OTHER"}})
# - if we modify one attribute
other_type = (
FEATURE_TYPE_SENTENCE
if type == FEATURE_TYPE_SEQUENCE
else FEATURE_TYPE_SEQUENCE
)
other_seq_len = 1 if other_type == FEATURE_TYPE_SENTENCE else seq_len
other_matrix = np.full(shape=(other_seq_len, number - 1), fill_value=number)
if is_sparse:
other_matrix = scipy.sparse.coo_matrix(other_matrix)
modifications.append(
{**config, **{"feature_type": other_type, "features": other_matrix}}
)
# - if we modify one origin
modifications.append({**config, **{"origin": "Other"}})
# - if we modify one sequence length
if type == FEATURE_TYPE_SEQUENCE:
matrix = np.full(shape=(seq_len + 1, idx + 1), fill_value=idx)
if is_sparse:
matrix = scipy.sparse.coo_matrix(matrix)
modifications.append({**config, **{"features": matrix}})
return features_list, modifications
@pytest.mark.parametrize(
"is_sparse,type,number,use_expected_origin",
itertools.product(
[True, False],
[FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE],
[1, 2, 5],
[True, False],
),
)
def test_combine(is_sparse: bool, type: Text, number: int, use_expected_origin: bool):
features_list, modifications = _generate_feature_list_and_modifications(
is_sparse=is_sparse, type=type, number=number
)
modified_features = [Features(**config) for config in modifications]
first_dim = features_list[0].features.shape[0]
origins = [f"origin-{idx}" for idx in range(len(features_list))]
if number == 1:
# in this case the origin will be same str as before, not a list
origins = origins[0]
expected_origin = origins if use_expected_origin else None
# works as expected
combination = Features.combine(features_list, expected_origins=expected_origin)
assert combination.features.shape[1] == int(number * (number + 1) / 2)
assert combination.features.shape[0] == first_dim
assert combination.origin == origins
assert combination.is_sparse() == is_sparse
matrix = combination.features
if is_sparse:
matrix = combination.features.todense()
for idx in range(number):
offset = int(idx * (idx + 1) / 2)
assert np.all(matrix[:, offset : (offset + idx + 1)] == idx + 1)
# fails as expected in these cases
if use_expected_origin and number > 1:
for modified_feature in modified_features:
features_list_copy = features_list.copy()
features_list_copy[-1] = modified_feature
with pytest.raises(ValueError):
Features.combine(features_list_copy, expected_origins=expected_origin)
@pytest.mark.parametrize(
"is_sparse,type,number",
itertools.product(
[True, False], [FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE], [1, 2, 5]
),
)
def test_filter(is_sparse: bool, type: Text, number: int):
features_list, modifications = _generate_feature_list_and_modifications(
is_sparse=is_sparse, type=type, number=number
)
# fix the filter configuration first (note: we ignore origin on purpose for now)
filter_config = dict(attributes=["fixed-attribute"], type=type, is_sparse=is_sparse)
# we get all features back if all features map...
result = Features.filter(features_list, **filter_config)
assert len(result) == number
# ... and less matches if we change the (relevant) properties of some features
modified_features = [
Features(**config)
for config in modifications
if set(config.keys()).intersection(filter_config.keys())
]
if number > 1:
for modified_feature in modified_features:
features_list_copy = features_list.copy()
features_list_copy[-1] = modified_feature
result = Features.filter(features_list_copy, **filter_config)
assert len(result) == number - 1
if number > 2:
for feat_a, feat_b in itertools.combinations(modified_features, 2):
features_list_copy = features_list.copy()
features_list_copy[-1] = feat_a
features_list_copy[-2] = feat_b
result = Features.filter(features_list_copy, **filter_config)
assert len(result) == number - 2
# don't forget to check the origin
filter_config = dict(
attributes=["fixed-attribute"],
type=type,
origin=["origin-0"],
is_sparse=is_sparse,
)
result = Features.filter(features_list, **filter_config)
assert len(result) == 1
@pytest.mark.parametrize(
"num_features_per_attribute,specified_attributes",
itertools.product(
[{"a": 3, "b": 1, "c": 0}],
[None, ["a", "b", "c", "doesnt-appear"], ["doesnt-appear"]],
),
)
def test_groupby(
num_features_per_attribute: Dict[Text, int],
specified_attributes: Optional[List[Text]],
):
features_list = []
for attribute, number in num_features_per_attribute.items():
for idx in range(number):
matrix = np.full(shape=(1, idx + 1), fill_value=idx + 1)
config = dict(
features=matrix,
attribute=attribute,
feature_type=FEATURE_TYPE_SEQUENCE, # doesn't matter
origin=f"origin-{idx}", # doens't matter
)
feat = Features(**config)
features_list.append(feat)
result = Features.groupby_attribute(features_list, attributes=specified_attributes)
if specified_attributes is None:
for attribute, number in num_features_per_attribute.items():
if number > 0:
assert attribute in result
assert len(result[attribute]) == number
else:
assert attribute not in result
else:
assert set(result.keys()) == set(specified_attributes)
for attribute in specified_attributes:
assert attribute in result
number = num_features_per_attribute.get(attribute, 0)
assert len(result[attribute]) == number
@pytest.mark.parametrize(
"shuffle_mode,num_features_per_combination",
itertools.product(
["reversed", "random"], [[1, 0, 0, 0], [1, 1, 1, 1], [2, 3, 4, 5], [0, 1, 2, 2]]
),
)
def test_reduce(
shuffle_mode: Text, num_features_per_combination: Tuple[int, int, int, int]
):
# all combinations - in the expected order
# (i.e. all sparse before all dense and sequence before sentence)
all_combinations = [
(FEATURE_TYPE_SEQUENCE, True),
(FEATURE_TYPE_SENTENCE, True),
(FEATURE_TYPE_SEQUENCE, False),
(FEATURE_TYPE_SENTENCE, False),
]
# multiply accordingly and mess up the order
chosen_combinations = [
spec
for spec, num in zip(all_combinations, num_features_per_combination)
for _ in range(num)
]
if shuffle_mode == "reversed":
messed_up_order = reversed(chosen_combinations)
else:
# Note: rng.permutation would mess up the types
rng = np.random.default_rng(23452345)
permutation = rng.permutation(len(chosen_combinations))
messed_up_order = [chosen_combinations[idx] for idx in permutation]
# create features accordingly
features_list = []
for idx, (type, is_sparse) in enumerate(messed_up_order):
first_dim = 1 if type == FEATURE_TYPE_SEQUENCE else 3
matrix = np.full(shape=(first_dim, 1), fill_value=1)
if is_sparse:
matrix = scipy.sparse.coo_matrix(matrix)
config = dict(
features=matrix,
attribute="fixed-attribute", # must be the same
feature_type=type,
origin="origin-does-matter-here", # must be the same
)
feat = Features(**config)
features_list.append(feat)
# reduce!
reduced_list = Features.reduce(features_list)
assert len(reduced_list) == sum(num > 0 for num in num_features_per_combination)
idx = 0
for num, (type, is_sparse) in zip(num_features_per_combination, all_combinations):
if num == 0:
# nothing to check here - because we already checked the length above
# and check the types and shape of all existing features in this loop
pass
else:
feature = reduced_list[idx]
assert feature.is_sparse() == is_sparse
assert feature.type == type
assert feature.features.shape[-1] == num
idx += 1
@pytest.mark.parametrize("differ", ["attribute", "origin"])
def test_reduce_raises_if_combining_different_origins_or_attributes(differ: Text):
# create features accordingly
arbitrary_fixed_type = FEATURE_TYPE_SENTENCE
features_list = []
for idx in range(2):
first_dim = 1
arbitrary_matrix_matching_type = np.full(shape=(first_dim, 1), fill_value=1)
config = dict(
features=arbitrary_matrix_matching_type,
attribute="fixed-attribute" if differ != "attribute" else f"attr-{idx}",
feature_type=arbitrary_fixed_type,
origin="fixed-origin" if differ != "origin" else f"origin-{idx}",
)
feat = Features(**config)
features_list.append(feat)
# reduce!
if differ == "attribute":
message = "Expected all Features to describe the same attribute"
expected_origin = ["origin"]
else:
message = "Expected 'origin-1' to be the origin of the 0-th"
expected_origin = ["origin-1"]
with pytest.raises(ValueError, match=message):
Features.reduce(features_list, expected_origins=expected_origin)
| |
#!/usr/bin/python
import os, sys, time
from collections import defaultdict
from optparse import OptionParser
from trigrams import Trigram, debug, text_to_trigrams, MODES, open_maybe_gzip
from trigrams import TRIGRAM_OFFSET_FACTOR, ANTI_TRIGRAM_OFFSET_FACTOR
def find_git_root():
p = Popen(["git", "rev-parse", "--show-toplevel"], stdout=PIPE)
root = p.communicate()[0].strip()
if p.returncode:
raise SystemExit("This does not seem to be a git repository (git returned %s)" %
p.returncode)
return root
BASE_DIR = find_git_root()
RAW_CORPI_DIR = os.path.join(BASE_DIR, "corpora/raw/")
COOKED_CORPI_DIR = os.path.join(BASE_DIR, "corpora/trigram/")
TRIGRAM_MODEL_DIR = os.path.join(BASE_DIR, "corpora/compiled/")
#this might not be so on other machines
STASH_DIR = os.path.join(BASE_DIR, "stash")
#TEST_FILE_1 = os.path.join(STASH_DIR, "drink-the-hose-2011051203.txt.gz")
TEST_FILE_1 = os.path.join(STASH_DIR, "drink-the-hose-2011051103.txt.gz")
#TEST_FILE_1 = os.path.join(STASH_DIR, "drink-the-hose-2011050811.txt.gz")
DEFAULT_MODE = 'word_aware_lc'
#DEFAULT_THRESHOLD = 0.5
DEFAULT_THRESHOLD = "LOL"
CORPI = (#name, gzipped, pre-trigrammised
("presidents", False, True),
("carroll-alice", False, True),
("dasher_training_english_GB", False, True),
("english-web", False, True),
("lulz", False, True),
("enron-sent", True, True),
("wikipedia", False, True),
("irc", True, True),
("bash-org", False, True),
("barely-english", False, False),
)
ANTI_CORPI = (
("anti-english", False, True),
("near-english", False, False),
)
def raw_corpi_path(base, gz):
return os.path.join(RAW_CORPI_DIR,
base + ('.txt.gz' if gz else '.txt'))
def cooked_corpi_path(base, mode, gz=False):
return os.path.join(COOKED_CORPI_DIR,
"%s-%s.%s" % (base, mode, ('txt.gz' if gz else 'txt')))
def pre_cook(modes=MODES, corpi=CORPI):
if isinstance(modes, str):
modes = [mode]
for mode in modes:
for base, gz, precooked in corpi:
dest = cooked_corpi_path(base, mode, gz)
src = raw_corpi_path(base, gz)
text_to_trigrams(src, dest, mode)
def pre_cook_full(modes=MODES, corpi=CORPI):
for mode in modes:
print mode
tg = get_trigram_with_antimodel(mode, use_raw=True, corpi=corpi)
def load_corpi(mode, corpi=CORPI):
if isinstance(mode, Trigram):
tg = mode
mode = tg.mode
else:
tg = Trigram(mode=mode)
for base, gz, precooked in corpi:
if precooked:
tg.load_trigrams(cooked_corpi_path(base, mode, gz))
else:
fn = raw_corpi_path(base, gz)
tg.import_text(fn)
return tg
def bisect_the_hose(trigram, infile, goodfile, rejectfile, threshold):
f = open_maybe_gzip(infile)
if goodfile is None:
goodfile = os.devnull
if rejectfile is None:
rejectfile = os.devnull
fgood = open_maybe_gzip(goodfile, 'w')
frej = open_maybe_gzip(rejectfile, 'w')
if isinstance(threshold, str):
threshold = trigram.probable_similarity(threshold)
debug("threshold is", threshold)
hose_filter = trigram.hose_filter(f)
for d in hose_filter:
if d['score'] >= threshold:
fgood.write("%(score)5f %(text)s\n" % d)
else:
frej.write("%(score)5f %(text)s\n" % d)
f.close()
fgood.close()
frej.close()
def order_the_hose(trigram, infile, outfile):
f = open_maybe_gzip(infile)
fout = open_maybe_gzip(outfile, 'w')
hose_filter = trigram.hose_filter(f)
rows = [(d['score'], d['text']) for d in hose_filter]
rows.sort()
for r in rows:
fout.write("%5f %s\n" % r)
f.close()
fout.close()
def group_by_user(trigram, infile, users=None):
if users is None:
users = {}
for d in trigram.hose_filter(infile):
users.setdefault(d['screen_name'], []).append(d['score'])
return d
def get_trigram(mode, use_raw=False, corpi=CORPI, name_stem=''):
if use_raw:
tg = load_corpi(mode, corpi)
tg.save_trigrams(os.path.join(TRIGRAM_MODEL_DIR, '%s%s.txt' % (name_stem, mode)))
else:
tg = Trigram(mode=mode)
tg.load_trigrams(os.path.join(TRIGRAM_MODEL_DIR, '%s%s.txt' % (name_stem, mode)))
return tg
def get_anti_trigram(mode, use_raw=False):
tg = get_trigram(mode, use_raw=use_raw,
corpi=ANTI_CORPI, name_stem='anti-'
)
return tg
def get_trigram_with_antimodel(mode, use_raw=False, corpi=CORPI,
tof=TRIGRAM_OFFSET_FACTOR, atof=ANTI_TRIGRAM_OFFSET_FACTOR):
tg = get_trigram(mode, use_raw=use_raw, corpi=corpi)
atg = get_anti_trigram(tg.mode, use_raw=use_raw)
tg.calculate_entropy(other=atg, offset_factor=tof,
other_offset_factor=atof)
return tg
def iter_stash(d):
if not os.path.isdir(d):
yield d
return
for fn in os.listdir(d):
if fn.startswith('drink-the-hose'):
yield(os.path.join(d, fn))
def _group_by_user(tg, src=STASH_DIR):
users = {}
for fn in iter_stash(src):
print "doing %s" % fn
group_by_user(tg, fn, users)
print "%s users" % len(users)
return users
def users_report(users):
counts = defaultdict(int)
for v in users.itervalues():
counts[len(v)] += 1
for k in range(1, max(counts.keys()) + 1):
print "%3d %s" % (k, counts.get(k, '.'))
def partition_users(users, outfile, rejfile, threshold):
if outfile is None:
outfile = os.devnull
if rejfile is None:
rejfile = os.devnull
fout = open_maybe_gzip(outfile, 'w')
frej = open_maybe_gzip(rejfile, 'w')
for k, v in users.iteritems():
if len(v) == 1:
mean = v[0]
else:
mean = sum(v) / len(v)
f = (fout if mean >= threshold else frej)
f.write("%4f %s\n" % (mean, k))
fout.close()
frej.close()
def dump_users(users, outfile):
"""Write all the users to a file with their scores."""
fout = open_maybe_gzip(outfile, 'w')
for k, v in users.iteritems():
mean = sum(v) / len(v)
fout.write("%4f %s\n" % (mean, k))
fout.close()
def queue_from_file(fn):
from subprocess import check_call
debug("Queueing users in %r for download" % (fn,))
os.chdir(BASE_DIR) #because twextract has git-root relative imports
f = open(fn)
for line in f:
score, user = line.strip().split(None, 1)
check_call(['python', 'twextract/request_queue.py', user, '-c' 'config/ceres'])
time.sleep(0.001)
f.close()
def main():
parser = OptionParser()
parser.add_option("-m", "--trigram-mode", help="how to trigrammise [%s]" % DEFAULT_MODE,
default=DEFAULT_MODE)
parser.add_option("-c", "--recompile", help="Derive trigrams from corpora", action="store_true")
parser.add_option("-C", "--recompile-all", help="Derive trigrams for all modes and exit",
action="store_true")
parser.add_option("-T", "--trial", help="show scores of tweets, not users", action="store_true")
parser.add_option("-t", "--threshold", help="use this as threshold",
default=str(DEFAULT_THRESHOLD), metavar="(STRING|FLOAT)")
parser.add_option("-i", "--input", help="input file or directory", metavar="PATH")
parser.add_option("-b", "--bad-file", help="write rejects here", metavar="FILE")
parser.add_option("-g", "--good-file", help="write good ones here", metavar="FILE")
parser.add_option("-d", "--dump-file", help="write them all here, perhaps in order", metavar="FILE")
parser.add_option("-q", "--queue", help="queue the good users for download", action="store_true")
parser.add_option("-Q", "--queue-from-file",
help="queue from a pre-existing list (no evaluation)", metavar="FILE")
parser.add_option("-r", "--report", help="get statistical data on stderr", action="store_true")
parser.add_option("-f", "--offset-factor", help="English unseen trigram probablility factor",
type="float", default=TRIGRAM_OFFSET_FACTOR, metavar="FLOAT")
parser.add_option("-a", "--anti-offset-factor", help="non-English unseen trigram probablility factor",
type="float", default=ANTI_TRIGRAM_OFFSET_FACTOR, metavar="FLOAT")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
if options.recompile_all:
pre_cook(modes=MODES, corpi=CORPI + ANTI_CORPI)
pre_cook_full(modes=MODES, corpi=CORPI + ANTI_CORPI)
sys.exit()
if options.queue_from_file:
queue_from_file(options.queue_from_file)
sys.exit()
src = options.input
good = options.good_file
bad = options.bad_file
dump = options.dump_file
mode = options.trigram_mode
if options.recompile:
pre_cook_full(modes=[mode], corpi=CORPI + ANTI_CORPI)
tg = get_trigram_with_antimodel(mode, tof=options.offset_factor,
atof=options.anti_offset_factor)
try:
threshold = float(options.threshold)
except ValueError:
threshold = tg.probable_similarity(options.threshold)
debug("Threshold from %r is %s" %(options.threshold, threshold))
if options.trial:
if src is None:
src = TEST_FILE_1
if good or bad:
bisect_the_hose(tg, src, good, bad, threshold=threshold)
if dump:
order_the_hose(tg, src, dump)
elif good or bad or dump:
if src is None:
src = STASH_DIR
users = _group_by_user(tg, src)
if options.report:
users_report(users)
if good or bad:
partition_users(users, good, bad, threshold)
if dump is not None:
dump_users(users, dump)
if good and options.queue:
queue_from_file(good)
else:
debug("nothing much to do!")
if __name__ == '__main__':
main()
| |
#a simple web server to replace the buggy one in python's standard library
import os, os.path, sys, time, socket, traceback, stat
from math import *
INDEXFILE = "/bluenoise6.html"
PORT = 8080
mimetypes = {
".js" : "application/x-javascript",
".html" : "text/html",
".json" : "application/x-javascript",
".glsl" : "text/plain",
".png" : "image/png",
".jpg" : "image/jpeg",
".obj" : "text/plain"
};
def get_mime(path):
path = path.strip().lower()
for k in mimetypes:
if path.endswith(k):
return mimetypes[k]
return "application/x-octet-stream"
class SocketFile:
def __init__(self, con):
self.sock = con
self.writebuf = b""
self.readbuf = b""
con.setblocking(False)
def __next__(self):
bsize = 2048
wsize = 1024*8
try:
buf = self.sock.recv(2048)
self.readbuf += buf
except BlockingIOError:
pass
try:
buf = self.writebuf
if len(buf) > wsize:
buf = buf[:wsize]
self.sock.send(buf)
self.writebuf = self.writebuf[len(buf):]
except BlockingIOError:
pass
def write(self, buf):
self.writebuf += buf
def read(self, max=2048):
buf = self.readbuf
if len(buf) > max:
buf = buf[:max]
self.readbuf = self.readbuf[max:]
else:
self.readbuf = b""
return buf
def Connection(con, addr, cls):
con.setblocking(False)
file = SocketFile(con)
while 1:
sbuf = b""
yield
while 1:
file.__next__()
buf = file.read()
yield 1
if (len(buf) == 0): continue;
sbuf += buf
if b"\r\n\r\n" in sbuf:
break;
lines = [l.strip() for l in sbuf.split(b"\r\n")]
method = lines[0];
headers = {}
path = method[method.find(b" ")+1:method.find(b" HTTP")].strip()
method = method[:method.find(b" ")]
print(str(method + b" " + path, "latin-1"))
for l in lines[1:]:
key = l[:l.find(b":")].strip()
val = l[l.find(b":")+1:].strip()
#print(key, val)
headers[str(key, "latin-1")] = str(val, "latin-1")
h = cls()
h.path = str(path, "latin-1")
h.method = str(method, "latin-1")
h.headers = headers
h.rfile = file
h.wfile = file
getattr(h, "do_"+str(method, "latin-1").strip())()
#print("\n")
yield
class Server:
def __init__(self, addr, cls):
self.connections = []
self.addr = addr
self.cls = cls
self.socket = socket.socket()
def start(self):
self.socket.bind(self.addr)
self.socket.listen(10)
sock = self.socket
sock.setblocking(False)
while 1:
dellist = []
try:
ret = sock.accept()
#print(ret[1])
con = Connection(ret[0], ret[1], self.cls)
self.connections.append(con)
except BlockingIOError:
pass
for con in self.connections:
try:
for i in range(45):
con.__next__()
except StopIteration:
print(" connection closed")
dellist.append(con)
except:
traceback.print_exc()
dellist.append(con);
for con in dellist:
self.connections.remove(con)
time.sleep(1.0/420.0)
pass
def bs(s):
if type(s) == bytes:
return s
return bytes(str(s), "latin-1")
class Handler:
def __init__(self):
self.path = ""
self.headers = {}
self.wfile = None
self.rfile = None
self.send_headers = []
self.body = b""
self.code = 200
def send_response(self, code):
self.body = bs(self.body)
buf = b"HTTP/1.1 " + bs(code) + b" None\r\n"
had_content = False
headers = [
[b"Connection", b"keep-alive"]
] + self.send_headers
for h in headers:
if h[0] == b"Content-length":
had_content = True
buf += bs(h[0]) + b":" + b" " + bs(h[1]) + b"\r\n"
if not had_content:
buf += b"Content-length: " + bs(len(self.body)) + b"\r\n"
buf += b"\r\n"
buf += self.body
self.wfile.write(buf)
def add_header(self, key, val):
self.send_headers.append([bs(key), bs(val)])
def set_body(self, body):
self.body = body
def send_error(self, error):
body = b"Error: " + bs(error)
self.add_header("MimeType", "text/plain")
self.set_body(body)
self.send_response(error)
def do_GET(self):
path = self.path.strip()
dir = os.getcwd()
if path == "/" or path == "":
path = INDEXFILE
abspath = os.path.abspath(os.path.normpath(dir+os.path.sep+path))
if not abspath.startswith(dir):
self.send_error(404)
return
if not os.path.exists(abspath):
self.send_error(404)
return
st = os.stat(abspath)
if stat.S_ISDIR(st.st_mode):
self.send_error(405)
return
file = open(abspath, "rb")
buf = file.read()
file.close()
self.set_body(buf)
self.add_header("MimeType", get_mime(path))
self.send_response(200)
server = Server(("", PORT), Handler)
print("serving at port", PORT)
server.start()
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import cStringIO
import json
import sys
import time
from logging import StreamHandler, Formatter
import gevent
import IPython
from devp2p.service import BaseService
from ethereum.slogging import getLogger
from ethereum._solidity import compile_file
from ethereum.utils import denoms
from gevent.event import Event
from IPython.lib.inputhook import inputhook_manager
from pyethapp.utils import bcolors as bc
from pyethapp.jsonrpc import address_encoder, default_gasprice
from pyethapp.console_service import GeventInputHook, SigINTHandler
from raiden.utils import events, get_contract_path
# ipython needs to accept "--gui gevent" option
IPython.core.shellapp.InteractiveShellApp.gui.values += ('gevent',)
inputhook_manager.register('gevent')(GeventInputHook)
def print_usage():
print("\t{}use `{}raiden{}` to interact with the raiden service.".format(
bc.OKBLUE, bc.HEADER, bc.OKBLUE))
print("\tuse `{}chain{}` to interact with the blockchain.".format(bc.HEADER, bc.OKBLUE))
print("\tuse `{}discovery{}` to find raiden nodes.".format(bc.HEADER, bc.OKBLUE))
print("\tuse `{}tools{}` for convenience with tokens, channels, funding, ...".format(
bc.HEADER, bc.OKBLUE))
print("\tuse `{}denoms{}` for ether calculations".format(bc.HEADER, bc.OKBLUE))
print("\tuse `{}lastlog(n){}` to see n lines of log-output. [default 10] ".format(
bc.HEADER, bc.OKBLUE))
print("\tuse `{}lasterr(n){}` to see n lines of stderr. [default 1]".format(
bc.HEADER, bc.OKBLUE))
print("\tuse `{}help(<topic>){}` for help on a specific topic.".format(bc.HEADER, bc.OKBLUE))
print("\ttype `{}usage(){}` to see this help again.".format(bc.HEADER, bc.OKBLUE))
print("\n" + bc.ENDC)
class Console(BaseService):
"""A service starting an interactive ipython session when receiving the
SIGSTP signal (e.g. via keyboard shortcut CTRL-Z).
"""
name = 'console'
def __init__(self, app):
print('1')
super(Console, self).__init__(app)
print('2')
self.interrupt = Event()
self.console_locals = {}
if app.start_console:
self.start()
self.interrupt.set()
else:
SigINTHandler(self.interrupt)
def _stop_app(self):
try:
self.app.stop()
except gevent.GreenletExit:
pass
def start(self):
# start console service
super(Console, self).start()
class Raiden(object):
def __init__(self, app):
self.app = app
self.console_locals = dict(
_raiden=Raiden(self.app),
raiden=self.app.raiden,
chain=self.app.raiden.chain,
discovery=self.app.discovery,
tools=ConsoleTools(
self.app.raiden,
self.app.discovery,
self.app.config['settle_timeout'],
self.app.config['reveal_timeout'],
),
denoms=denoms,
true=True,
false=False,
usage=print_usage,
)
def _run(self):
self.interrupt.wait()
print('\n' * 2)
print("Entering Console" + bc.OKGREEN)
print("Tip:" + bc.OKBLUE)
print_usage()
# Remove handlers that log to stderr
root = getLogger()
for handler in root.handlers[:]:
if isinstance(handler, StreamHandler) and handler.stream == sys.stderr:
root.removeHandler(handler)
stream = cStringIO.StringIO()
handler = StreamHandler(stream=stream)
handler.formatter = Formatter("%(levelname)s:%(name)s %(message)s")
root.addHandler(handler)
def lastlog(n=10, prefix=None, level=None):
"""Print the last `n` log lines to stdout.
Use `prefix='p2p'` to filter for a specific logger.
Use `level=INFO` to filter for a specific level.
Level- and prefix-filtering are applied before tailing the log.
"""
lines = (stream.getvalue().strip().split('\n') or [])
if prefix:
lines = filter(lambda line: line.split(':')[1].startswith(prefix), lines)
if level:
lines = filter(lambda line: line.split(':')[0] == level, lines)
for line in lines[-n:]:
print(line)
self.console_locals['lastlog'] = lastlog
err = cStringIO.StringIO()
sys.stderr = err
def lasterr(n=1):
"""Print the last `n` entries of stderr to stdout.
"""
for line in (err.getvalue().strip().split('\n') or [])[-n:]:
print(line)
self.console_locals['lasterr'] = lasterr
IPython.start_ipython(argv=['--gui', 'gevent'], user_ns=self.console_locals)
self.interrupt.clear()
sys.exit(0)
class ConsoleTools(object):
def __init__(self, raiden_service, discovery, settle_timeout, reveal_timeout):
self._chain = raiden_service.chain
self._raiden = raiden_service
self._discovery = discovery
self.settle_timeout = settle_timeout
self.reveal_timeout = reveal_timeout
self.deposit = self._raiden.api.deposit
def create_token(
self,
initial_alloc=10 ** 6,
name='raidentester',
symbol='RDT',
decimals=2,
timeout=60,
gasprice=default_gasprice,
auto_register=True):
"""Create a proxy for a new HumanStandardToken (ERC20), that is
initialized with Args(below).
Per default it will be registered with 'raiden'.
Args:
initial_alloc (int): amount of initial tokens.
name (str): human readable token name.
symbol (str): token shorthand symbol.
decimals (int): decimal places.
timeout (int): timeout in seconds for creation.
gasprice (int): gasprice for the creation transaction.
auto_register (boolean): if True(default), automatically register
the asset with raiden.
Returns:
token_address: the hex encoded address of the new token/asset.
"""
contract_path = get_contract_path('HumanStandardToken.sol')
# Deploy a new ERC20 token
token_proxy = self._chain.client.deploy_solidity_contract(
self._raiden.address, 'HumanStandardToken',
compile_file(contract_path),
dict(),
(initial_alloc, name, decimals, symbol),
contract_path=contract_path,
gasprice=gasprice,
timeout=timeout)
token_address = token_proxy.address.encode('hex')
if auto_register:
self.register_asset(token_address)
print("Successfully created {}the token '{}'.".format(
'and registered ' if auto_register else ' ',
name
))
return token_address
def register_asset(self, token_address):
"""Register a token with the raiden asset manager.
Args:
token_address (string): a hex encoded token address.
Returns:
channel_manager: the channel_manager contract_proxy.
"""
# Add the ERC20 token to the raiden registry
self._chain.default_registry.add_asset(token_address)
# Obtain the channel manager for the token
channel_manager = self._chain.manager_by_asset(token_address.decode('hex'))
# Register the channel manager with the raiden registry
self._raiden.register_channel_manager(channel_manager)
return channel_manager
def ping(self, peer, timeout=0):
"""
See, if a peer is discoverable and up.
Args:
peer (string): the hex-encoded (ethereum) address of the peer.
timeout (int): The number of seconds to wait for the peer to
acknowledge our ping
Returns:
success (boolean): True if ping succeeded, False otherwise.
"""
# Check, if peer is discoverable
try:
self._discovery.get(peer.decode('hex'))
except KeyError:
print("Error: peer {} not found in discovery".format(peer))
return False
async_result = self._raiden.protocol.send_ping(peer.decode('hex'))
return async_result.wait(timeout) is not None
def open_channel_with_funding(self, token_address, peer, amount,
settle_timeout=None,
reveal_timeout=None):
"""Convenience method to open a channel.
Args:
token_address (str): hex encoded address of the token for the channel.
peer (str): hex encoded address of the channel peer.
amount (int): amount of initial funding of the channel.
settle_timeout (int): amount of blocks for the settle time (if None use app defaults).
reveal_timeout (int): amount of blocks for the reveal time (if None use app defaults).
Return:
netting_channel: the (newly opened) netting channel object.
"""
# Check, if peer is discoverable
try:
self._discovery.get(peer.decode('hex'))
except KeyError:
print("Error: peer {} not found in discovery".format(peer))
return
self._raiden.api.open(
token_address,
peer,
settle_timeout=settle_timeout,
reveal_timeout=reveal_timeout,
)
return self._raiden.api.deposit(token_address, peer, amount)
def channel_stats_for(self, token_address, peer, pretty=False):
"""Collect information about sent and received transfers
between yourself and your peer for the given asset.
Args:
token_address (string): hex encoded address of the token
peer (string): hex encoded address of the peer
pretty (boolean): if True, print a json representation instead of returning a dict
Returns:
stats (dict): collected stats for the channel or None if pretty
"""
# Get the asset
asset = self._chain.asset(token_address.decode('hex'))
# Obtain the asset manager
asset_manager = self._raiden.managers_by_asset_address[token_address.decode('hex')]
assert asset_manager
# Get the channel
channel = asset_manager.get_channel_by_partner_address(peer.decode('hex'))
assert channel
# Collect data
stats = dict(
transfers=dict(
received=[t.transferred_amount for t in channel.received_transfers],
sent=[t.transferred_amount for t in channel.sent_transfers],
),
channel=(channel
if not pretty
else channel.external_state.netting_channel.address.encode('hex')),
lifecycle=dict(
opened_at=channel.external_state.opened_block or 'not yet',
open=channel.isopen,
closed_at=channel.external_state.closed_block or 'not yet',
settled_at=channel.external_state.settled_block or 'not yet',
),
funding=channel.external_state.netting_channel.detail(self._raiden.address),
asset=dict(
our_balance=asset.balance_of(self._raiden.address),
partner_balance=asset.balance_of(peer.decode('hex')),
name=asset.proxy.name(),
symbol=asset.proxy.symbol(),
),
)
stats['funding']['our_address'] = stats['funding']['our_address'].encode('hex')
stats['funding']['partner_address'] = stats['funding']['partner_address'].encode('hex')
if not pretty:
return stats
else:
print(json.dumps(stats, indent=2, sort_keys=True))
def show_events_for(self, token_address, peer):
"""Find all EVM-EventLogs for a channel.
Args:
token_address (string): hex encoded address of the token
peer (string): hex encoded address of the peer
Returns:
events (list)
"""
# Obtain the asset manager
asset_manager = self._raiden.get_manager_by_asset_address(token_address.decode('hex'))
assert asset_manager
# Get the address for the netting contract
netcontract_address = asset_manager.get_channel_by_partner_address(
peer.decode('hex')).external_state.netting_channel.address
assert len(netcontract_address)
# Get the netting_channel instance
netting_channel = self._chain.netting_channel(netcontract_address)
return events.netting_channel_events(self._chain.client, netting_channel)
def wait_for_contract(self, contract_address, timeout=None):
start_time = time.time()
result = self._raiden.chain.client.call(
'eth_getCode',
address_encoder(contract_address),
'latest',
)
current_time = time.time()
while result == '0x':
if timeout and start_time + timeout > current_time:
return False
result = self._raiden.chain.client.call(
'eth_getCode',
address_encoder(contract_address),
'latest',
)
gevent.sleep(0.5)
current_time = time.time()
return result != '0x'
| |
import sys, os, unittest
import numpy as np
import numpy.testing as nptest
import fcdiff
class UnsharedRegionModelTest(unittest.TestCase):
def test_str(self):
""" Tests informal string representation.
"""
model = fcdiff.UnsharedRegionModel()
model_str = model.__str__()
assert(type(model_str) == str)
def test_sample(self):
""" Tests sampling all random variables.
"""
model = fcdiff.UnsharedRegionModel()
N = 10
H = 5
U = 4
C = 45
(R, T, F, f_tilde, B, b_tilde) = model.sample(N, H, U)
nptest.assert_equal(R.shape, (N, U))
nptest.assert_equal(R.dtype, np.dtype('bool'))
nptest.assert_equal(T.shape, (C, U))
nptest.assert_equal(T.dtype, np.dtype('bool'))
nptest.assert_equal(F.shape, (C, 3))
nptest.assert_equal(F.dtype, np.dtype('bool'))
nptest.assert_equal(f_tilde.shape, (C, U, 3))
nptest.assert_equal(f_tilde.dtype, np.dtype('bool'))
nptest.assert_equal(B.shape, (C, H))
nptest.assert_equal(B.dtype, np.dtype('float64'))
nptest.assert_equal(b_tilde.shape, (C, U))
nptest.assert_equal(b_tilde.dtype, np.dtype('float64'))
def test_sample_R(self):
""" Tests sampling R and checks estimated pi.
"""
model = fcdiff.UnsharedRegionModel()
r = model.sample_R(3, 10000)
pi_est = np.mean(r, axis = 1)
nptest.assert_allclose(pi_est, model.pi, atol = 0.02)
def test_sample_T_partially_connected(self):
""" Tests sampling T and given R that implies partial connectivity.
"""
model = fcdiff.UnsharedRegionModel()
r = np.tile(np.array([[1], [0], [0]], dtype='bool'), (1, 10000))
t = model.sample_T(r)
nptest.assert_equal(t[2, :], 0)
eta_est = np.mean(t[0:2, :], axis = 1)
nptest.assert_allclose(eta_est, model.eta, atol = 0.05)
def test_sample_T_fully_connected(self):
""" Tests sampling T given R that implies full connectivity.
"""
model = fcdiff.UnsharedRegionModel()
r = np.ones((3, 1), dtype = 'bool')
t = model.sample_T(r)
t_exp = np.ones((3, 1), dtype = 'bool')
nptest.assert_array_equal(t, t_exp)
def test_sample_T_fully_disconnected(self):
""" Tests sampling T given R that implies full disconnectivity.
"""
model = fcdiff.UnsharedRegionModel()
r = np.zeros((3, 1), dtype = 'bool')
t = model.sample_T(r)
t_exp = np.zeros((3, 1), dtype = 'bool')
nptest.assert_array_equal(t, t_exp)
def test_sample_F_valid(self):
""" Tests sampling F with valid arguments.
"""
model = fcdiff.UnsharedRegionModel()
f = model.sample_F(100)
gamma_est = np.mean(f, axis = 0)
nptest.assert_allclose(gamma_est, model.gamma, atol = 0.05)
def test_sample_F_tilde_typical_negative(self):
""" Tests sampling F-tilde with a typical negative connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[1, 0, 0]], dtype = 'bool')
t = np.tile(np.array([0], dtype = 'bool'), (1, 10000))
f_tilde = model.sample_F_tilde(f, t)
p_est = np.squeeze(np.mean(f_tilde, axis = 1))
e = model.epsilon
p_exp = np.array([1 - e, e / 2, e / 2])
nptest.assert_allclose(p_est, p_exp, atol = 0.05)
def test_sample_F_tilde_typical_neutral(self):
""" Tests sampling F-tilde with a typical neutral connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[0, 1, 0]], dtype = 'bool')
t = np.tile(np.array([0], dtype = 'bool'), (1, 10000))
f_tilde = model.sample_F_tilde(f, t)
p_est = np.squeeze(np.mean(f_tilde, axis = 1))
e = model.epsilon
p_exp = np.array([e / 2, 1 - e, e / 2])
nptest.assert_allclose(p_est, p_exp, atol = 0.05)
def test_sample_F_tilde_typical_positive(self):
""" Tests sampling F-tilde with a typical positive connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[0, 0, 1]], dtype = 'bool')
t = np.tile(np.array([0], dtype = 'bool'), (1, 10000))
f_tilde = model.sample_F_tilde(f, t)
p_est = np.squeeze(np.mean(f_tilde, axis = 1))
e = model.epsilon
p_exp = np.array([e / 2, e / 2, 1 - e])
nptest.assert_allclose(p_est, p_exp, atol = 0.05)
def test_sample_F_tilde_anomalous_negative(self):
""" Tests sampling F-tilde with an anomalous negative connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[1, 0, 0]], dtype = 'bool')
t = np.tile(np.array([1], dtype = 'bool'), (1, 10000))
f_tilde = model.sample_F_tilde(f, t)
p_est = np.squeeze(np.mean(f_tilde, axis = 1))
e = model.epsilon
p_exp = np.array([e, (1 - e) / 2, (1 - e) / 2])
nptest.assert_allclose(p_est, p_exp, atol = 0.05)
def test_sample_F_tilde_anomalous_neutral(self):
""" Tests sampling F-tilde with an anomalous neutral connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[0, 1, 0]], dtype = 'bool')
t = np.tile(np.array([1], dtype = 'bool'), (1, 10000))
f_tilde = model.sample_F_tilde(f, t)
p_est = np.squeeze(np.mean(f_tilde, axis = 1))
e = model.epsilon
p_exp = np.array([(1 - e) / 2, e, (1 - e) / 2])
nptest.assert_allclose(p_est, p_exp, atol = 0.05)
def test_sample_F_tilde_anomalous_positive(self):
""" Tests sampling F-tilde with an anomalous positive connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[0, 0, 1]], dtype = 'bool')
t = np.tile(np.array([1], dtype = 'bool'), (1, 10000))
f_tilde = model.sample_F_tilde(f, t)
p_est = np.squeeze(np.mean(f_tilde, axis = 1))
e = model.epsilon
p_exp = np.array([(1 - e) / 2, (1 - e) / 2, e])
nptest.assert_allclose(p_est, p_exp, atol = 0.05)
def test_sample_B_range(self):
""" Tests that sampling B creates values in [-1, 1].
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
], dtype = 'bool')
b = model.sample_B(f, 10000)
nptest.assert_array_less(b, 1.00001)
nptest.assert_array_less(-1.00001, b)
def test_sample_B_negative(self):
""" Tests sampling B from a negative connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[1, 0, 0]], dtype = 'bool')
b = model.sample_B(f, 10000)
mu_est = np.mean(b, axis = 1)
sigma_est = np.std(b, axis = 1)
nptest.assert_allclose(mu_est, model.mu[0], atol = 0.05)
nptest.assert_allclose(sigma_est, model.sigma[0], atol = 0.05)
def test_sample_B_neutral(self):
""" Tests sampling B from a neutral connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[0, 1, 0]], dtype = 'bool')
b = model.sample_B(f, 10000)
mu_est = np.mean(b, axis = 1)
sigma_est = np.std(b, axis = 1)
nptest.assert_allclose(mu_est, model.mu[1], atol = 0.05)
nptest.assert_allclose(sigma_est, model.sigma[1], atol = 0.05)
def test_sample_B_positive(self):
""" Tests sampling B from a positive connection.
"""
model = fcdiff.UnsharedRegionModel()
f = np.array([[0, 0, 1]], dtype = 'bool')
b = model.sample_B(f, 10000)
mu_est = np.mean(b, axis = 1)
sigma_est = np.std(b, axis = 1)
nptest.assert_allclose(mu_est, model.mu[2], atol = 0.05)
nptest.assert_allclose(sigma_est, model.sigma[2], atol = 0.05)
def test_sample_B_tilde_range(self):
""" Tests that sampling b_tilde creates values in [-1, 1].
"""
model = fcdiff.UnsharedRegionModel()
f_tilde = np.tile(
np.array([
[[1, 0, 0]],
[[0, 1, 0]],
[[0, 0, 1]],
], dtype = 'bool'),
(1, 10000, 1))
b_tilde = model.sample_B_tilde(f_tilde)
nptest.assert_array_less(b_tilde, 1 + np.finfo(float).eps)
nptest.assert_array_less(-1 - np.finfo(float).eps, b_tilde)
def test_sample_B_tilde_negative(self):
""" Tests sampling b_tilde from a negative connection.
"""
model = fcdiff.UnsharedRegionModel()
f_tilde = np.tile(np.array([[[1, 0, 0]]], dtype = 'bool'), (1, 10000, 1))
b_tilde = model.sample_B_tilde(f_tilde)
mu_est = np.mean(b_tilde, axis = 1)
sigma_est = np.std(b_tilde, axis = 1)
nptest.assert_allclose(mu_est, model.mu[0], atol = 0.05)
nptest.assert_allclose(sigma_est, model.sigma[0], atol = 0.02)
def test_sample_B_tilde_neutral(self):
""" Tests sampling b_tilde from a neutral connection.
"""
model = fcdiff.UnsharedRegionModel()
f_tilde = np.tile(np.array([[[0, 1, 0]]], dtype = 'bool'), (1, 10000, 1))
b_tilde = model.sample_B_tilde(f_tilde)
mu_est = np.mean(b_tilde, axis = 1)
sigma_est = np.std(b_tilde, axis = 1)
nptest.assert_allclose(mu_est, model.mu[1], atol = 0.05)
nptest.assert_allclose(sigma_est, model.sigma[1], atol = 0.02)
def test_sample_B_tilde_positive(self):
""" Tests sampling b_tilde from a positive connection.
"""
model = fcdiff.UnsharedRegionModel()
f_tilde = np.tile(np.array([[[0, 0, 1]]], dtype = 'bool'), (1, 10000, 1))
b_tilde = model.sample_B_tilde(f_tilde)
mu_est = np.mean(b_tilde, axis = 1)
sigma_est = np.std(b_tilde, axis = 1)
nptest.assert_allclose(mu_est, model.mu[2], atol = 0.05)
nptest.assert_allclose(sigma_est, model.sigma[2], atol = 0.02)
| |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import shutil
import tempfile
import uuid
from keystoneclient.v2_0 import client as keystoneclient
from muranoclient.common import exceptions as muranoclient_exc
from muranoclient.v1 import client as muranoclient
import six
from murano.common import config
from murano.dsl import exceptions
from murano.engine import yaql_yaml_loader
from murano.openstack.common import log as logging
from murano.packages import exceptions as pkg_exc
from murano.packages import load_utils
LOG = logging.getLogger(__name__)
class PackageLoader(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def get_package(self, name):
pass
@abc.abstractmethod
def get_package_by_class(self, name):
pass
class ApiPackageLoader(PackageLoader):
def __init__(self, token_id, tenant_id):
self._cache_directory = self._get_cache_directory()
self._client = self._get_murano_client(token_id, tenant_id)
def get_package_by_class(self, name):
filter_opts = {'class_name': name, 'limit': 1}
try:
package_definition = self._get_definition(filter_opts)
return self._get_package_by_definition(package_definition)
except(LookupError, pkg_exc.PackageLoadError):
raise exceptions.NoPackageForClassFound(name)
def get_package(self, name):
filter_opts = {'fqn': name, 'limit': 1}
try:
package_definition = self._get_definition(filter_opts)
return self._get_package_by_definition(package_definition)
except(LookupError, pkg_exc.PackageLoadError):
raise exceptions.NoPackageFound(name)
@staticmethod
def _get_cache_directory():
directory = os.path.join(config.CONF.packages_opts.packages_cache,
str(uuid.uuid4()))
directory = os.path.abspath(directory)
os.makedirs(directory)
LOG.debug('Cache for package loader is located at: '
'{0}'.format(directory))
return directory
@staticmethod
def _get_murano_client(token_id, tenant_id):
murano_settings = config.CONF.murano
endpoint_url = murano_settings.url
if endpoint_url is None:
keystone_settings = config.CONF.keystone
keystone_client = keystoneclient.Client(
endpoint=keystone_settings.auth_url,
cacert=keystone_settings.ca_file or None,
cert=keystone_settings.cert_file or None,
key=keystone_settings.key_file or None,
insecure=keystone_settings.insecure
)
if not keystone_client.authenticate(
auth_url=keystone_settings.auth_url,
tenant_id=tenant_id,
token=token_id):
raise muranoclient_exc.HTTPUnauthorized()
endpoint_url = keystone_client.service_catalog.url_for(
service_type='application_catalog',
endpoint_type=murano_settings.endpoint_type
)
return muranoclient.Client(
endpoint=endpoint_url,
key_file=murano_settings.key_file or None,
cacert=murano_settings.cacert or None,
cert_file=murano_settings.cert_file or None,
insecure=murano_settings.insecure,
token=token_id
)
def _get_definition(self, filter_opts):
try:
packages = self._client.packages.filter(**filter_opts)
try:
return packages.next()
except StopIteration:
LOG.debug('There are no packages matching filter '
'{0}'.format(filter_opts))
# TODO(smelikyan): This exception should be replaced with one
# defined in python-muranoclient
raise LookupError()
except muranoclient_exc.HTTPException:
LOG.debug('Failed to get package definition from repository')
raise LookupError()
def _get_package_by_definition(self, package_def):
package_id = package_def.id
package_name = package_def.fully_qualified_name
package_directory = os.path.join(self._cache_directory, package_name)
if os.path.exists(package_directory):
try:
return load_utils.load_from_dir(
package_directory, preload=True,
loader=yaql_yaml_loader.YaqlYamlLoader)
except pkg_exc.PackageLoadError:
LOG.exception('Unable to load package from cache. Clean-up...')
shutil.rmtree(package_directory, ignore_errors=True)
try:
package_data = self._client.packages.download(package_id)
except muranoclient_exc.HTTPException:
LOG.exception('Unable to download '
'package with id {0}'.format(package_id))
raise pkg_exc.PackageLoadError()
package_file = None
try:
with tempfile.NamedTemporaryFile(delete=False) as package_file:
package_file.write(package_data)
return load_utils.load_from_file(
package_file.name,
target_dir=package_directory,
drop_dir=False,
loader=yaql_yaml_loader.YaqlYamlLoader
)
except IOError:
LOG.exception('Unable to write package file')
raise pkg_exc.PackageLoadError()
finally:
try:
if package_file:
os.remove(package_file.name)
except OSError:
pass
def cleanup(self):
shutil.rmtree(self._cache_directory, ignore_errors=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
return False
class DirectoryPackageLoader(PackageLoader):
def __init__(self, base_path):
self._base_path = base_path
self._processed_entries = set()
self._packages_by_class = {}
self._packages_by_name = {}
self._build_index()
def get_package(self, name):
return self._packages_by_name.get(name)
def get_package_by_class(self, name):
return self._packages_by_class.get(name)
def _build_index(self):
for entry in os.listdir(self._base_path):
folder = os.path.join(self._base_path, entry)
if not os.path.isdir(folder) or entry in self._processed_entries:
continue
try:
package = load_utils.load_from_dir(
folder, preload=True,
loader=yaql_yaml_loader.YaqlYamlLoader)
except pkg_exc.PackageLoadError:
LOG.exception('Unable to load package from path: '
'{0}'.format(entry))
continue
for c in package.classes:
self._packages_by_class[c] = package
self._packages_by_name[package.full_name] = package
self._processed_entries.add(entry)
| |
import os
import sys
import tempfile
from dodo_commands import CommandError, Dodo
from dodo_commands.framework.global_config import load_global_config_parser
from dodo_commands.framework.paths import Paths
from dodo_commands.framework.util import bordered, is_using_system_dodo, symlink
def _args():
parser = Dodo.parser
parser.description = (
"Install command packages into the global "
+ "commands directory. "
+ _packages_in_extra_dir()
)
parser.add_argument(
"paths", nargs="*", help="Create symlinks to these command directories"
)
parser.add_argument(
"--as",
dest="as_directory",
help="Use this name for the target commands directory",
)
parser.add_argument(
"--pip", nargs="*", help="Pip install the commands in these packages"
)
parser.add_argument(
"--git", nargs="*", help="Clone a git repo into the commands directory"
)
parser.add_argument(
"--remove",
action="store_true",
help="Remove commands from the commands directory",
)
parser.add_argument(
"--to-defaults",
action="store_true",
help="Install into the default commands directory",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--make-default",
help="Create a symlink to a global commands package in the default commands directory",
)
group.add_argument(
"--remove-default",
help="Remove a symlink to a global commands package from the default commands directory",
)
return Dodo.parse_args()
def check_setuptools():
if not Dodo.run([_python_path(), "-c", "import setuptools"]):
_report_error(
"\n"
+ bordered(
"Error: your python version does not have setuptools installed.\n"
+ "Check the settings.python option in %s"
% Paths().global_config_filename()
)
)
sys.exit(1)
def _packages_in_extra_dir():
extra_dir = Paths().extra_dir()
packages = [
x
for x in os.listdir(extra_dir)
if os.path.isdir(os.path.join(extra_dir, x)) and not x.startswith("__")
]
if len(packages) == 0:
return ""
if len(packages) == 1:
msg = " The %s package is found automagically " % packages[0]
else:
packages[-1] = "and " + packages[-1]
msg = " The %s packages are found automagically " % ", ".join(packages)
return (
msg
+ " in the dodo_commands.extra package"
+ ", e.g. the following works: dodo install-commands %s." % packages[0]
)
def _report_error(msg):
sys.stderr.write(msg + os.linesep)
def _remove_package(package, only_from_defaults=False):
"""Install the dir with the default commands."""
if not only_from_defaults:
dest_dir = os.path.join(Paths().global_commands_dir(), package)
if not os.path.exists(dest_dir):
raise CommandError("Not installed: %s" % dest_dir)
Dodo.run(["rm", "-rf", dest_dir])
defaults_dest_dir = os.path.join(Paths().default_commands_dir(), package)
if os.path.exists(defaults_dest_dir):
Dodo.run(["rm", defaults_dest_dir])
def _install_package(package, as_directory, install_commands_function, add_to_defaults):
"""Install the dir with the global commands."""
package_dirname = as_directory or package
dest_dir = os.path.join(Paths().global_commands_dir(), package_dirname)
defaults_dest_dir = os.path.join(Paths().default_commands_dir(), package_dirname)
if add_to_defaults and os.path.exists(defaults_dest_dir):
_report_error("Error: already installed: %s" % defaults_dest_dir)
return False
if not install_commands_function(dest_dir):
return False
if add_to_defaults:
if not os.path.exists(dest_dir):
_report_error("Error: not found: %s" % dest_dir)
return False
Dodo.run(["ln", "-s", dest_dir, defaults_dest_dir])
return True
def _install_commands_from_path(path, dest_dir, mv=False):
"""Install the dir with the global commands."""
if not os.path.exists(path):
alt_path = os.path.join(Paths().extra_dir(), path)
if os.path.exists(alt_path):
path = alt_path
else:
_report_error("Error: path not found: %s" % path)
return False
if os.path.exists(dest_dir):
_report_error("Error: already installed: %s" % dest_dir)
return False
if mv:
Dodo.run(["mv", path, dest_dir])
else:
try:
Dodo.run(["ln", "-s", os.path.abspath(path), dest_dir])
except Exception:
_report_error("Error: could not create a symlink in %s." % dest_dir)
return True
def _python_path():
config = load_global_config_parser()
return config.get("settings", "python_interpreter")
def _install_commands_from_package(package):
Dodo.run(
[
_python_path(),
"-m",
"pip",
"install",
"--upgrade",
"--target",
Paths().global_commands_dir(),
package,
]
)
return True
def _clone_git_repo(repo_path):
tmp_dir = tempfile.mkdtemp()
Dodo.run(["git", "clone", repo_path], cwd=tmp_dir)
package = os.listdir(tmp_dir)[0]
return tmp_dir, package
if Dodo.is_main(__name__):
args = _args()
if args.pip and not is_using_system_dodo():
raise CommandError(
"Please activate the default environment first by running 'dodo env default'."
)
if args.make_default:
_install_package(args.make_default, None, lambda: True, True)
sys.exit(0)
if args.remove_default:
_remove_package(args.remove_default, only_from_defaults=True)
sys.exit(0)
if args.paths:
for path in args.paths:
package = os.path.basename(path)
if args.remove:
_remove_package(package)
else:
_install_package(
package,
args.as_directory,
lambda dest_dir: _install_commands_from_path(path, dest_dir),
args.to_defaults,
)
if args.pip:
check_setuptools()
for package in args.pip:
if args.remove:
_remove_package(package)
else:
if args.as_directory:
raise CommandError(
"Sorry, the --as option is not supported when --pip is used."
)
_install_package(
package,
args.as_directory,
lambda dest_dir: _install_commands_from_package(package),
args.to_defaults,
)
if args.git:
for repo_path in args.git:
if args.remove:
raise CommandError(
"The --git option is not supported when removing a package."
+ " Please use dodo install-commands --remove <package>."
)
tmp_dir, package = _clone_git_repo(repo_path)
_install_package(
package,
args.as_directory,
lambda dest_dir: _install_commands_from_path(
os.path.join(tmp_dir, package), dest_dir, mv=True
),
args.to_defaults,
)
Dodo.run(["rm", "-rf", tmp_dir])
| |
from datetime import datetime
from pandas import DatetimeIndex, Series
import numpy as np
import dateutil.tz
import pytz
import pytest
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = DatetimeIndex(freq='1D', periods=3, start='2017-01-01')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype=object)
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype=object)
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = DatetimeIndex(['2017-01-01', pd.NaT, '2017-01-03'])
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeIndexRendering(object):
def test_dti_repr_short(self):
dr = pd.date_range(start='1/1/2012', periods=1)
repr(dr)
dr = pd.date_range(start='1/1/2012', periods=2)
repr(dr)
dr = pd.date_range(start='1/1/2012', periods=3)
repr(dr)
@pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
def test_dti_representation(self, method):
idxs = []
idxs.append(DatetimeIndex([], freq='D'))
idxs.append(DatetimeIndex(['2011-01-01'], freq='D'))
idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D'))
idxs.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idxs.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idxs.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idxs, exp):
result = getattr(indx, method)()
assert result == expected
def test_dti_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = ("0 2011-01-01\n"
"dtype: datetime64[ns]")
exp3 = ("0 2011-01-01\n"
"1 2011-01-02\n"
"dtype: datetime64[ns]")
exp4 = ("0 2011-01-01\n"
"1 2011-01-02\n"
"2 2011-01-03\n"
"dtype: datetime64[ns]")
exp5 = ("0 2011-01-01 09:00:00+09:00\n"
"1 2011-01-01 10:00:00+09:00\n"
"2 2011-01-01 11:00:00+09:00\n"
"dtype: datetime64[ns, Asia/Tokyo]")
exp6 = ("0 2011-01-01 09:00:00-05:00\n"
"1 2011-01-01 10:00:00-05:00\n"
"2 NaT\n"
"dtype: datetime64[ns, US/Eastern]")
exp7 = ("0 2011-01-01 09:00:00\n"
"1 2011-01-02 10:15:00\n"
"dtype: datetime64[ns]")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_dti_summary(self):
# GH#9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = ("DatetimeIndex: 0 entries\n"
"Freq: D")
exp2 = ("DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n"
"Freq: D")
exp3 = ("DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n"
"Freq: D")
exp4 = ("DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n"
"Freq: D")
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx._summary()
assert result == expected
def test_dti_business_repr(self):
# only really care that it works
repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1)))
def test_dti_business_summary(self):
rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
rng._summary()
rng[2:2]._summary()
def test_dti_business_summary_pytz(self):
pd.bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)._summary()
def test_dti_business_summary_dateutil(self):
pd.bdate_range('1/1/2005', '1/1/2009',
tz=dateutil.tz.tzutc())._summary()
def test_dti_custom_business_repr(self):
# only really care that it works
repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1),
freq='C'))
def test_dti_custom_business_summary(self):
rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1),
freq='C')
rng._summary()
rng[2:2]._summary()
def test_dti_custom_business_summary_pytz(self):
pd.bdate_range('1/1/2005', '1/1/2009', freq='C',
tz=pytz.utc)._summary()
def test_dti_custom_business_summary_dateutil(self):
pd.bdate_range('1/1/2005', '1/1/2009', freq='C',
tz=dateutil.tz.tzutc())._summary()
| |
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import logging
import copy
from urlparse import urlparse
from org.o3project.odenos.remoteobject.remote_object import RemoteObject
from org.o3project.odenos.remoteobject.object_property import ObjectProperty
from org.o3project.odenos.remoteobject.message.request import Request
from org.o3project.odenos.remoteobject.message.response import Response
from org.o3project.odenos.remoteobject.manager.component.event.component_changed import(
ComponentChanged
)
from org.o3project.odenos.remoteobject.manager.component.component_type\
import ComponentType
from org.o3project.odenos.remoteobject.manager.system.event.component_manager_changed\
import ComponentManagerChanged
from org.o3project.odenos.core.util.request_parser import RequestParser
class ComponentManager(RemoteObject):
# FIXME
DESCRIPTION = "ComponentManager for python"
def __init__(self, object_id, dispatcher):
RemoteObject.__init__(self, object_id, dispatcher)
self.component_classes = {}
self.components = {}
self.dispatcher = dispatcher
self.__parser = RequestParser()
self.__add_rules()
self._object_property.set_state(ObjectProperty.State.RUNNING)
def register_to_system_manager(self):
logging.debug("object_property of ComponentManager %s is %s",
self.object_id,
self._object_property.packed_object())
self.__register_component_managers()
path = "component_managers/%s" % self.object_id
resp = self._request(self.dispatcher.system_manager_id,
Request.Method.PUT,
path,
self._object_property)
if resp.is_error(Request.Method.PUT):
logging.error("Failed registration to SystemManager.")
self._object_property.set_state(ObjectProperty.State.ERROR)
return
self.__register_event_manager()
self.__subscribe_event()
logging.info("Complete registration to SystemManager.")
def __register_component_managers(self):
resp = self._request(self.dispatcher.system_manager_id,
Request.Method.GET,
"component_managers",
None)
if resp.is_error(Request.Method.GET):
logging.error("Failed get component_managers from SystemManager.")
self._object_property.set_state(ObjectProperty.State.ERROR)
return
for component_manager in resp.body:
self.__register_other_component_manager(component_manager)
def __register_other_component_manager(self, component_manager):
object_id = component_manager[ObjectProperty.OBJECT_ID]
if object_id == self.object_id:
return
logging.info("Register Other Component Manager ID:%s", object_id)
self.dispatcher.add_remote_client(object_id)
def __unregister_component_manager(self, object_id):
self.dispatcher.remove_remote_client(object_id)
def __register_event_manager(self):
resp = self._request(self.dispatcher.system_manager_id,
Request.Method.GET,
"objects/%s" % self.dispatcher.event_manager_id,
None)
if resp.is_error(Request.Method.GET):
self._object_property.set_state(ObjectProperty.State.ERROR)
return
self.dispatcher.add_remote_client(self.dispatcher.event_manager_id)
def __subscribe_event(self):
self._event_subscription.add_filter(
self.dispatcher.system_manager_id,
ComponentManagerChanged.TYPE)
self._apply_event_subscription()
def register_component_type(self, component):
component_name = component.__name__
logging.info("Register Component Type:%s", component_name)
self.component_classes[component_name] = component
component_types = \
self._object_property.get_property(ObjectProperty.COMPONENT_TYPES)
if component_types:
component_types += (",%s" % component_name)
else:
component_types = "%s" % component_name
self._object_property.set_property(ObjectProperty.COMPONENT_TYPES,
component_types)
def __add_rules(self):
rules = []
rules.append({RequestParser.PATTERN: r"^component_types/?$",
RequestParser.METHOD: Request.Method.GET,
RequestParser.FUNC: self._do_get_component_types,
RequestParser.PARAMS: 0})
rules.append({RequestParser.PATTERN: r"^components/?$",
RequestParser.METHOD: Request.Method.GET,
RequestParser.FUNC: self._do_get_components,
RequestParser.PARAMS: 0})
rules.append({RequestParser.PATTERN: r"^components/"
+ "([a-zA-Z0-9_-]+)/?$",
RequestParser.METHOD: Request.Method.PUT,
RequestParser.FUNC: self._do_put_component,
RequestParser.PARAMS: 2})
rules.append({RequestParser.PATTERN: r"^components/"
+ "([a-zA-Z0-9_-]+)/?$",
RequestParser.METHOD: Request.Method.GET,
RequestParser.FUNC: self._do_get_component,
RequestParser.PARAMS: 1})
rules.append({RequestParser.PATTERN: r"^components/"
+ "([a-zA-Z0-9_-]+)/?$",
RequestParser.METHOD: Request.Method.DELETE,
RequestParser.FUNC: self._do_delete_component,
RequestParser.PARAMS: 1})
self.__parser.add_rule(rules)
def _on_request(self, request):
return self.__parser.action(request)
def _do_get_component(self, object_id):
if object_id in self.components:
return Response(Response.StatusCode.OK,
self.components[object_id].object_property)
return Response(Response.StatusCode.NOT_FOUND, None)
def _do_put_component(self, obj_prop, object_id):
component_type = obj_prop[ObjectProperty.OBJECT_TYPE]
if component_type not in self.component_classes:
return Response(Response.StatusCode.BAD_REQUEST, None)
elif object_id in self.components:
return Response(Response.StatusCode.CONFLICT, None)
component_class = self.component_classes[component_type]
self.components[object_id] = component_class(object_id,
self.dispatcher)
if self.components[object_id].on_initialize(obj_prop):
self.components[object_id].\
_object_property.set_state(ObjectProperty.State.RUNNING)
else:
self.components[object_id].\
_object_property.set_state(ObjectProperty.State.ERROR)
curr = self.components[object_id].object_property.packed_object()
self._do_component_changed(ComponentChanged.Action.ADD,
None,
curr)
logging.info("Created Component Type:%s ID:%s",
component_type, object_id)
return Response(Response.StatusCode.CREATED,
self.components[object_id].object_property)
def _do_delete_component(self, object_id):
if object_id in self.components:
component = self.components[object_id]
prev = copy.deepcopy(component._object_property).packed_object()
component.on_finalize()
del self.components[object_id]
self._do_component_changed(ComponentChanged.Action.DELETE,
prev,
None)
logging.info("Deleted Component ID:%s", object_id)
return Response(Response.StatusCode.OK, None)
def _do_get_components(self):
body = {}
for object_id in self.components:
body[object_id] = \
self.components[object_id].object_property.packed_object()
return Response(Response.StatusCode.OK, body)
def _do_get_component_types(self):
comp_types = {}
try:
for type_name, clazz in self.component_classes.items():
comp_id = "%s_%s" % (self.object_id, type_name)
component = clazz(comp_id, None)
obj_prop = component.object_property
type = obj_prop.get_property(ObjectProperty.OBJECT_TYPE)
super_type = obj_prop.get_property(ObjectProperty.OBJECT_SUPER_TYPE)
connection_types = {}
connection_types_str = obj_prop.get_property(
ObjectProperty.CONNECTION_TYPES)
conn_type_list = connection_types_str.split(",")
for type_elem in conn_type_list:
type_elem_list = type_elem.split(":")
if len(type_elem_list) == 2:
connection_types[type_elem_list[0]] = type_elem_list[1]
description = obj_prop.get_property(ObjectProperty.DESCRIPTION)
target = ComponentType(type, super_type,
connection_types, description)
comp_types[type_name] = target.packed_object()
except Exception, e:
return Response(Response.StatusCode.INTERNAL_SERVER_ERROR,
str(e))
return Response(Response.StatusCode.OK, comp_types)
def _do_component_changed(self, action, prev, curr):
body = {ComponentChanged.ACTION: action,
ComponentChanged.PREV: prev,
ComponentChanged.CURR: curr}
self._publish_event_async(ComponentChanged.TYPE, body)
def _do_event_componentmanagerchanged(self, event):
msg = None
try:
msg = ComponentManagerChanged.create_from_packed(event.body)
except KeyError, e:
logging.error("Receive Invalid ComponentManagerChanged Message"
+ "KeyError: " + str(e))
return
if msg.action == ComponentManagerChanged.Action.ADD:
self.__register_other_component_manager(msg.curr)
elif msg.action == ComponentManagerChanged.Action.DELETE:
self.__unregister_component_manager(
msg.prev[ObjectProperty.OBJECT_ID])
| |
# -*- coding: utf-8 -*-
from collections import defaultdict
from django.template import Template
from cms import api
from cms.models import Placeholder
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import TransactionCMSTestCase
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.urlutils import admin_reverse
from sekizai.data import UniqueSequence
from sekizai.helpers import get_varname
class AliasTestCase(TransactionCMSTestCase):
def _get_example_obj(self):
obj = Example1.objects.create(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
return obj
def test_add_plugin_alias(self):
page_en = api.create_page("PluginOrderPage", "col_two.html", "en")
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
with self.login_user_context(self.get_superuser()):
response = self.client.post(admin_reverse('cms_create_alias'), data={'plugin_id': text_plugin_1.pk})
self.assertEqual(response.status_code, 200)
response = self.client.post(admin_reverse('cms_create_alias'), data={'placeholder_id': ph_en.pk})
self.assertEqual(response.status_code, 200)
response = self.client.post(admin_reverse('cms_create_alias'))
self.assertEqual(response.status_code, 400)
response = self.client.post(admin_reverse('cms_create_alias'), data={'plugin_id': 20000})
self.assertEqual(response.status_code, 400)
response = self.client.post(admin_reverse('cms_create_alias'), data={'placeholder_id': 20000})
self.assertEqual(response.status_code, 400)
response = self.client.post(admin_reverse('cms_create_alias'), data={'plugin_id': text_plugin_1.pk})
self.assertEqual(response.status_code, 403)
page_en.publish('en')
response = self.client.get(page_en.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "I'm the first", html=False)
def test_alias_recursion(self):
page_en = api.create_page(
"Alias plugin",
"col_two.html",
"en",
slug="page1",
published=True,
in_navigation=True,
)
ph_1_en = page_en.placeholders.get(slot="col_left")
ph_2_en = page_en.placeholders.get(slot="col_sidebar")
api.add_plugin(ph_1_en, 'StylePlugin', 'en', tag_type='div', class_name='info')
api.add_plugin(ph_1_en, 'AliasPlugin', 'en', alias_placeholder=ph_2_en)
api.add_plugin(ph_2_en, 'AliasPlugin', 'en', alias_placeholder=ph_1_en)
with self.login_user_context(self.get_superuser()):
response = self.client.get(page_en.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="info">', html=True)
def test_alias_recursion_across_pages(self):
superuser = self.get_superuser()
page_1 = api.create_page("page-1", "col_two.html", "en", published=True)
page_1_pl = page_1.placeholders.get(slot="col_left")
source_plugin = api.add_plugin(page_1_pl, 'StylePlugin', 'en', tag_type='div', class_name='info')
# this creates a recursive alias on the same page
alias_plugin = api.add_plugin(page_1_pl, 'AliasPlugin', 'en', plugin=source_plugin, target=source_plugin)
self.assertTrue(alias_plugin.is_recursive())
with self.login_user_context(superuser):
response = self.client.get(page_1.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="info">', html=False)
page_2 = api.create_page("page-2", "col_two.html", "en")
page_2_pl = page_2.placeholders.get(slot="col_left")
# This points to a plugin with a recursive alias
api.add_plugin(page_2_pl, 'AliasPlugin', 'en', plugin=source_plugin)
with self.login_user_context(superuser):
response = self.client.get(page_2.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="info">', html=False)
def test_alias_content_plugin_display(self):
'''
In edit mode, content is shown regardless of the source page publish status.
In published mode, content is shown only if the source page is published.
'''
superuser = self.get_superuser()
source_page = api.create_page(
"Alias plugin",
"col_two.html",
"en",
published=False,
)
source_plugin = api.add_plugin(
source_page.placeholders.get(slot="col_left"),
'LinkPlugin',
language='en',
name='A Link',
external_link='https://www.django-cms.org',
)
target_page = api.create_page(
"Alias plugin",
"col_two.html",
"en",
published=False,
)
api.add_plugin(
target_page.placeholders.get(slot="col_left"),
'AliasPlugin',
language='en',
plugin=source_plugin,
)
with self.login_user_context(superuser):
# Not published, not edit mode: hide content
response = self.client.get(target_page.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
# Not published, edit mode: show content
response = self.client.get(target_page.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
source_page.publish('en')
with self.login_user_context(superuser):
# Published, not edit mode: show content
response = self.client.get(target_page.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
# Published, edit mode: show content
response = self.client.get(target_page.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
def test_alias_content_placeholder_display(self):
'''
In edit mode, content is shown regardless of the source page publish status.
In published mode, content is shown only if the source page is published.
'''
superuser = self.get_superuser()
source_page = api.create_page(
"Alias plugin",
"col_two.html",
"en",
published=False,
)
source_placeholder = source_page.placeholders.get(slot="col_left")
api.add_plugin(
source_placeholder,
'LinkPlugin',
language='en',
name='A Link',
external_link='https://www.django-cms.org',
)
target_page = api.create_page(
"Alias plugin",
"col_two.html",
"en",
published=False,
)
api.add_plugin(
target_page.placeholders.get(slot="col_left"),
'AliasPlugin',
language='en',
alias_placeholder=source_placeholder,
)
with self.login_user_context(superuser):
# Not published, not edit mode: hide content
response = self.client.get(target_page.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
# Not published, edit mode: show content
response = self.client.get(target_page.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
source_page.publish('en')
with self.login_user_context(superuser):
# Published, not edit mode: show content
response = self.client.get(target_page.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
# Published, edit mode: show content
response = self.client.get(target_page.get_absolute_url() + '?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="https://www.django-cms.org" >A Link</a>', html=True)
def test_alias_placeholder_is_not_editable(self):
"""
When a placeholder is aliased, it shouldn't render as editable
in the structure mode.
"""
source_page = api.create_page(
"Home",
"col_two.html",
"en",
published=True,
in_navigation=True,
)
source_placeholder = source_page.placeholders.get(slot="col_left")
style = api.add_plugin(
source_placeholder,
'StylePlugin',
'en',
tag_type='div',
class_name='info',
)
target_page = api.create_page(
"Target",
"col_two.html",
"en",
published=True,
in_navigation=True,
)
target_placeholder = target_page.placeholders.get(slot="col_left")
alias = api.add_plugin(
target_placeholder,
'AliasPlugin',
'en',
alias_placeholder=source_placeholder,
)
with self.login_user_context(self.get_superuser()):
context = self.get_context(path=target_page.get_absolute_url(), page=target_page)
request = context['request']
request.session['cms_edit'] = True
request.toolbar = CMSToolbar(request)
renderer = request.toolbar.get_content_renderer()
context[get_varname()] = defaultdict(UniqueSequence)
output = renderer.render_placeholder(
target_placeholder,
context=context,
language='en',
page=target_page,
editable=True
)
tag_format = '<template class="cms-plugin cms-plugin-start cms-plugin-{}">'
expected_plugins = [alias]
unexpected_plugins = [style]
for plugin in expected_plugins:
start_tag = tag_format.format(plugin.pk)
self.assertIn(start_tag, output)
for plugin in unexpected_plugins:
start_tag = tag_format.format(plugin.pk)
self.assertNotIn(start_tag, output)
editable_placeholders = renderer.get_rendered_editable_placeholders()
self.assertNotIn(source_placeholder,editable_placeholders)
def test_alias_from_page_change_form_text(self):
superuser = self.get_superuser()
api.create_page(
"Home",
"col_two.html",
"en",
published=True,
in_navigation=True,
)
source_page = api.create_page(
"Source",
"col_two.html",
"en",
published=True,
in_navigation=True,
)
source_placeholder = source_page.placeholders.get(slot="col_left")
api.add_plugin(
source_placeholder,
'StylePlugin',
'en',
tag_type='div',
class_name='info',
)
target_page = api.create_page(
"Target",
"col_two.html",
"en",
published=True,
in_navigation=True,
)
target_placeholder = target_page.placeholders.get(slot="col_left")
alias = api.add_plugin(
target_placeholder,
'AliasPlugin',
'en',
alias_placeholder=source_placeholder,
)
endpoint = self.get_change_plugin_uri(alias)
with self.login_user_context(superuser):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
expected = ('This is an alias reference, you can edit the '
'content only on the <a href="/en/source/?edit" '
'target="_parent">Source</a> page.')
self.assertContains(response, expected)
def test_alias_from_generic_change_form_text(self):
superuser = self.get_superuser()
source_placeholder = self._get_example_obj().placeholder
target_placeholder = self._get_example_obj().placeholder
alias = api.add_plugin(
target_placeholder,
'AliasPlugin',
'en',
alias_placeholder=source_placeholder,
)
endpoint = self.get_change_plugin_uri(alias, container=Example1)
with self.login_user_context(superuser):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
expected = 'There are no further settings for this plugin. Please press save.'
self.assertContains(response, expected)
def test_move_and_delete_plugin_alias(self):
'''
Test moving the plugin from the clipboard to a placeholder.
'''
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
with self.login_user_context(self.get_superuser()):
#
# Copies the placeholder to the clipboard...
#
self.client.post(admin_reverse('cms_create_alias'), data={'plugin_id': text_plugin_1.pk})
#
# Determine the copied plugins's ID. It should be in the special
# 'clipboard' placeholder.
#
try:
clipboard = Placeholder.objects.get(slot='clipboard')
except (Placeholder.DoesNotExist, Placeholder.MultipleObjectsReturned):
clipboard = 0
self.assertGreater(clipboard.pk, 0)
# The clipboard should only have a single plugin...
self.assertEqual(len(clipboard.get_plugins_list()), 1)
alias_plugin = clipboard.get_plugins_list()[0]
copy_endpoint = self.get_copy_plugin_uri(alias_plugin)
#
# Test moving it from the clipboard to the page's placeholder...
#
response = self.client.post(copy_endpoint, data={
'source_placeholder_id': clipboard.pk,
'source_plugin': alias_plugin.pk,
'source_language': 'en',
'target_placeholder_id': ph_en.pk,
'target_language': 'en',
})
self.assertEqual(response.status_code, 200)
#
# Now, test deleting the copy still on the clipboard...
#
delete_endpoint = self.get_delete_plugin_uri(alias_plugin)
response = self.client.post(delete_endpoint, data={})
self.assertEqual(response.status_code, 200)
def test_context_menus(self):
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
context = self.get_context(page=page_en)
context['placeholder'] = ph_en
template = Template('{% load cms_tags %}{% render_extra_menu_items placeholder %}')
output = template.render(context)
self.assertTrue(len(output), 200)
| |
from django.contrib import admin, messages
from django.core import urlresolvers
from django.contrib.admin.utils import flatten_fieldsets
from django.http import HttpResponse
from django.conf.urls import url
from django.shortcuts import redirect, get_object_or_404
from django.db import transaction
from django.contrib.auth.decorators import permission_required
from adminsortable.admin import SortableAdmin, NonSortableParentAdmin, SortableStackedInline
from courseevaluations.models import QuestionSet, FreeformQuestion, MultipleChoiceQuestion, MultipleChoiceQuestionOption, EvaluationSet, DormParentEvaluation, CourseEvaluation, IIPEvaluation, MultipleChoiceQuestionAnswer, FreeformQuestionAnswer, StudentEmailTemplate, MELPEvaluation
from academics.models import Student, AcademicYear, Enrollment, Section, Course, Teacher
from academics.utils import fmpxmlparser
class ReadOnlyAdmin(admin.ModelAdmin):
def get_readonly_fields(self, request, obj=None):
if self.declared_fieldsets:
return flatten_fieldsets(self.declared_fieldsets)
else:
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
def has_add_permission(self, *args, **kwargs):
return False
class MultipleChoiceQuestionOptionInline(SortableStackedInline):
model = MultipleChoiceQuestionOption
def get_extra(self, request, obj=None, **kwargs):
extra = 5
if obj:
return extra - obj.multiplechoicequestionoption_set.count()
return extra
class FreeformQuestionInline(SortableStackedInline):
model = FreeformQuestion
fields = ['question', 'edit_link']
readonly_fields = ['question', 'edit_link']
extra = 0
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def edit_link(self, o):
if o.id:
return "<a href=\"{0:}\" target=_blank>Edit Question</a>".format(urlresolvers.reverse('admin:courseevaluations_freeformquestion_change', args=(o.id,)))
edit_link.allow_tags = True
edit_link.short_description = 'Edit Link'
class MultipleChoiceQuestionInline(SortableStackedInline):
model = MultipleChoiceQuestion
fields = ['question', 'edit_link']
readonly_fields = ['question', 'edit_link']
extra = 0
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def edit_link(self, o):
if o.id:
return "<a href=\"{0:}\" target=_blank>Edit Question</a>".format(urlresolvers.reverse('admin:courseevaluations_multiplechoicequestion_change', args=(o.id,)))
return ""
edit_link.allow_tags = True
edit_link.short_description = 'Edit Link'
class MultipleChoiceQuestionAdmin(SortableAdmin):
inlines = [MultipleChoiceQuestionOptionInline]
class QuestionSetAdmin(NonSortableParentAdmin):
inlines = [MultipleChoiceQuestionInline, FreeformQuestionInline]
class CourseEvaluationAdmin(admin.ModelAdmin):
search_fields = ['student__first_name', 'student__last_name', 'student__email']
list_display = ['__str__', 'complete']
class EvaluationSetListFilter(admin.SimpleListFilter):
title = 'evaluation set'
parameter_name = 'evaluation_set_id'
def lookups(self, request, model_admin):
return [(es.id, es.name) for es in EvaluationSet.objects.all()]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(evaluation_set__id=self.value())
return queryset
class StudentListFilter(admin.SimpleListFilter):
title = 'student'
parameter_name = 'student_id'
def lookups(self, request, model_admin):
current_evaluables = model_admin.get_queryset(request)
students = Student.objects.filter(evaluable__in=current_evaluables).distinct()
return [(student.id, student.name) for student in students]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(student__id=self.value())
return queryset
list_filter = [EvaluationSetListFilter, 'complete', StudentListFilter]
class EvaluationSetAdmin(admin.ModelAdmin):
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context["question_sets"] = QuestionSet.objects.all()
return super().change_view(request=request, object_id=object_id, form_url=form_url, extra_context=extra_context)
def create_iip_evaluations(self, request, object_id):
redirect_url = urlresolvers.reverse('admin:courseevaluations_evaluationset_change', args=(object_id, ))
if not request.user.has_perm('courseevaluations.add_iipevaluation'):
messages.error(request, "You do not have the appropriate permissions to add IIP evaluations")
return redirect(redirect_url)
if request.method != 'POST':
messages.error(request, "Invalid request, please try again. No evaluations created.")
return redirect(redirect_url)
question_set_id = request.POST.get("question_set_id")
if not question_set_id:
messages.error(request, "Question set is required. No evaluations created.")
return redirect(redirect_url)
iip_evaluation_file = request.FILES.get('iip_evaluation_file')
if not iip_evaluation_file:
messages.error(request, "IIP evaluation file is required. No evaluations created.")
return redirect(redirect_url)
data = fmpxmlparser.parse_from_file(iip_evaluation_file)
results = data['results']
creation_count = 0
with transaction.atomic():
question_set = get_object_or_404(QuestionSet, pk=question_set_id)
evaluation_set = get_object_or_404(EvaluationSet, pk=object_id)
academic_year = AcademicYear.objects.current()
for row in results:
fields = row['parsed_fields']
student_id = fields['IDStudent']
teacher_id = fields['SectionTeacher::IDTEACHER']
student = Student.objects.get(student_id=student_id)
teacher = Teacher.objects.get(teacher_id=teacher_id)
enrollment = Enrollment.objects.get(student=student, academic_year=academic_year)
evaluable = IIPEvaluation()
evaluable.student = student
evaluable.teacher = teacher
evaluable.evaluation_set = evaluation_set
evaluable.question_set = question_set
evaluable.enrollment = enrollment
evaluable.save()
creation_count += 1
messages.add_message(request, messages.SUCCESS, "Successfully created {count:} IIP evaluations".format(count=creation_count))
return redirect(redirect_url)
def create_course_evaluations(self, request, object_id):
redirect_url = urlresolvers.reverse('admin:courseevaluations_evaluationset_change', args=(object_id, ))
if not request.user.has_perm('courseevaluations.add_courseevaluation'):
messages.error(request, "You do not have the appropriate permissions to add course evaluations")
return redirect(redirect_url)
if request.method != 'POST':
messages.error(request, "Invalid request, please try again. No evaluations created.")
return redirect(redirect_url)
course_type = request.POST.get("course_type")
#Course class will be the either a MELPEvaluation or a CourseEvaluation
if course_type == "melp":
CourseClass = MELPEvaluation
elif course_type == "course":
CourseClass = CourseEvaluation
else:
messages.error(request, "Course type is not defined. No evaluations created.")
return redirect(redirect_url)
question_set_id = request.POST.get("question_set_id")
if not question_set_id:
messages.error(request, "Question set is required. No evaluations created.")
return redirect(redirect_url)
course_evaluation_file = request.FILES.get('course_evaluation_file')
if not course_evaluation_file:
messages.error(request, "Course evaluation file is required. No evaluations created.")
return redirect(redirect_url)
data = fmpxmlparser.parse_from_file(course_evaluation_file)
results = data['results']
creation_count = 0
with transaction.atomic():
question_set = get_object_or_404(QuestionSet, pk=question_set_id)
evaluation_set = get_object_or_404(EvaluationSet, pk=object_id)
for row in results:
fields = row['parsed_fields']
csn = fields['CourseSectionNumber']
academic_year = fields['AcademicYear']
section = Section.objects.get(csn=csn, academic_year__year=academic_year)
for student in section.students.all():
enrollment = Enrollment.objects.get(student=student, academic_year__year=academic_year)
evaluable = CourseClass()
evaluable.student = student
evaluable.enrollment = enrollment
evaluable.section = section
evaluable.question_set = question_set
evaluable.evaluation_set = evaluation_set
evaluable.save()
creation_count += 1
messages.add_message(request, messages.SUCCESS, "Successfully created {count:} course evaluations".format(count=creation_count))
return redirect(redirect_url)
def create_dorm_parent_evaluations(self, request, object_id):
redirect_url = urlresolvers.reverse('admin:courseevaluations_evaluationset_change', args=(object_id, ))
if not request.user.has_perm('courseevaluations.add_dormparentevaluation'):
messages.error(request, "You do not have the appropriate permissions to add IIP evaluations")
return redirect(redirect_url)
if request.method != 'POST':
messages.error(request, "Invalid request, please try again. No evaluations created.")
return redirect(redirect_url)
question_set_id = request.POST.get("question_set_id")
if not question_set_id:
messages.error(request, "Question set is required. No evaluations created.")
return redirect(redirect_url)
with transaction.atomic():
question_set = get_object_or_404(QuestionSet, pk=question_set_id)
evaluation_set = get_object_or_404(EvaluationSet, pk=object_id)
academic_year = AcademicYear.objects.current()
enrollments = Enrollment.objects.filter(student__current=True, academic_year=academic_year).exclude(dorm=None)
creation_count = 0
for enrollment in enrollments:
student = enrollment.student
dorm = enrollment.dorm
heads = dorm.heads.all()
for teacher in heads:
evaluable = DormParentEvaluation()
evaluable.question_set = question_set
evaluable.evaluation_set = evaluation_set
evaluable.dorm = dorm
evaluable.parent = teacher
evaluable.student = student
evaluable.enrollment = enrollment
evaluable.save()
creation_count += 1
messages.add_message(request, messages.SUCCESS, "Successfully created {count:} dorm parent evaluations".format(count=creation_count))
return redirect(redirect_url)
def get_urls(self):
urls = super().get_urls()
my_urls = [
url(r'^(?P<object_id>[0-9]+)/process/create/dorm/parent/$',
self.admin_site.admin_view(self.create_dorm_parent_evaluations),
name='courseevaluations_evaluationset_create_dorm_parent_evals'),
url(r'^(?P<object_id>[0-9]+)/process/create/course/$',
self.admin_site.admin_view(self.create_course_evaluations),
name='courseevaluations_evaluationset_create_course_evals'),
url(r'^(?P<object_id>[0-9]+)/process/create/iip/$',
self.admin_site.admin_view(self.create_iip_evaluations),
name='courseevaluations_evaluationset_create_iip_evals'),
]
return my_urls + urls
class MELPEvaluationAdmin(CourseEvaluationAdmin):
pass
class IIPEvaluationAdmin(ReadOnlyAdmin):
list_filter = ['evaluation_set__name', ('student', admin.RelatedOnlyFieldListFilter)]
class DormParentEvaluationAdmin(ReadOnlyAdmin):
list_filter = ['evaluation_set__name', 'dorm', ('student', admin.RelatedOnlyFieldListFilter)]
# Register your models here.
admin.site.register(QuestionSet, QuestionSetAdmin)
admin.site.register(FreeformQuestion, SortableAdmin)
admin.site.register(MultipleChoiceQuestion, MultipleChoiceQuestionAdmin)
admin.site.register(EvaluationSet, EvaluationSetAdmin)
admin.site.register(CourseEvaluation, CourseEvaluationAdmin)
admin.site.register(MELPEvaluation, MELPEvaluationAdmin)
admin.site.register(IIPEvaluation, IIPEvaluationAdmin)
admin.site.register(DormParentEvaluation, DormParentEvaluationAdmin)
admin.site.register(StudentEmailTemplate)
| |
import time
import threading
import collections
import synapse.lib.sched as s_sched
from synapse.eventbus import EventBus
miss = ()
class Cache(EventBus):
'''
A callback driven cache with options for timeout / maxsize.
Example:
cache = Cache(maxtime=60)
cache.put(iden,thing)
# ... some time later
valu = cache.get(iden)
Notes:
The maxtime option is used to specify cache time based
flushing. Cache accesses continue to bump a timestamp
forward to facilite flushing the cache entries which
were least recently requested.
'''
def __init__(self, maxtime=None, onmiss=None):
EventBus.__init__(self)
self.sched = s_sched.getGlobSched()
self.onmiss = onmiss
self.cache = {}
self.lasthit = {}
self.schevt = None
self.maxtime = None
self.cachelock = threading.Lock()
self.onfini( self._onCacheFini )
if maxtime != None:
self.setMaxTime(maxtime)
def setOnMiss(self, onmiss):
'''
Set a callback function to use on cache miss.
Example:
def onmiss(key):
return stuff.get(key)
cache.setOnMiss( onmiss )
'''
self.onmiss = onmiss
def setMaxTime(self, valu):
oldm = self.maxtime
self.maxtime = valu
if oldm == None:
self._checkCacheTimes()
def _checkCacheTimes(self):
mintime = time.time() - self.maxtime
try:
hits = [ k for (k,t) in self.lasthit.items() if t < mintime ]
[ self.pop(k) for k in hits ]
finally:
if not self.isfini and self.maxtime != None:
ival = self.maxtime / 10.0
self.sched.insec(ival, self._checkCacheTimes )
def clear(self):
'''
Flush and clear the entire cache.
'''
[ self.flush(key) for key in self.keys() ]
self.cache.clear()
self.lasthit.clear()
def get(self, key):
'''
Return a val from the cache.
Example:
val = cache.get(key)
'''
val = self.cache.get(key,miss)
if val is not miss:
self.lasthit[key] = time.time()
return val
if self.onmiss == None:
return None
with self.cachelock:
val = self.cache.get(key,miss)
if val is miss:
val = self.onmiss(key)
self.cache[key] = val
self.lasthit[key] = time.time()
return val
def put(self, key, val):
'''
Put a key:val into the cache.
Example:
cache.put('woot',10)
'''
self.cache[key] = val
self.lasthit[key] = time.time()
self.fire('cache:put', key=key, val=val)
def pop(self, key):
'''
Remove and return a val from the cache.
Example:
cache.pop('woot')
'''
val = self.cache.pop(key,None)
self.lasthit.pop(key,None)
self.fire('cache:flush', key=key, val=val)
self.fire('cache:pop', key=key, val=val)
return val
def flush(self, key):
'''
Flush a key:val within the cache.
Example:
cache.flush('woot')
Notes:
* Mostly used to trigger "cache:flush" events
'''
val = self.cache.get(key)
self.fire('cache:flush', key=key, val=val)
return val
def keys(self):
'''
Return a list of the keys in the cache.
Example:
for key in cache.keys():
stuff(key)
'''
return list(self.cache.keys())
def values(self):
return list(self.cache.values())
def __len__(self):
return len(self.cache)
def __iter__(self):
return list( self.cache.items() )
def _onCacheFini(self):
for key in self.keys():
self.pop(key)
if self.schevt != None:
self.sched.cancel(self.schevt)
class FixedCache(EventBus):
'''
Implements a fixed-size cache.
For implementation speed, the cache will flush oldest values first
regardless of last cache hit.
'''
def __init__(self, maxsize=10000, onmiss=None):
EventBus.__init__(self)
self.cache = {}
self.onmiss = onmiss
self.maxsize = maxsize
self.cachelock = threading.Lock()
self.fifo = collections.deque()
def get(self, key):
'''
Return the value from the cache. If onmiss is set, lookup
entry on cache miss and add to cache.
Example:
valu = cache.get('foo')
if valu != None:
dostuff(valu)
'''
with self.cachelock:
valu = self.cache.get(key,miss)
if valu is miss and self.onmiss:
valu = self.onmiss(key)
self.cache[key] = valu
self.fifo.append(key)
while len(self.fifo) > self.maxsize:
nuk = self.fifo.popleft()
self.cache.pop(nuk,None)
if valu is miss:
return None
return valu
def clear(self):
'''
Remove all entries from the FixedCache.
'''
with self.cachelock:
self.fifo.clear()
self.cache.clear()
class TufoCache(Cache):
def __init__(self, core, maxtime=None):
Cache.__init__(self, maxtime=maxtime)
self.core = core
self.setOnMiss( core.getTufoByIden )
def _onTufoFlush(self, event):
iden = event[1].get('key')
tufo0 = event[1].get('val')
tufo1 = self.core.getTufoByIden(iden)
if tufo1 == None:
return
self.core.setTufoProps(tufo1, **tufo0[1])
class TufoPropCache(TufoCache):
def __init__(self, core, prop, maxtime=None):
TufoCache.__init__(self, core, maxtime=maxtime)
self.prop = prop
self.setOnMiss( self.getTufoByValu )
def getTufoByValu(self, valu):
return self.core.getTufoByProp(self.prop,valu)
def keymeth(name):
'''
Decorator for use with OnDem to add key callback methods.
'''
def keyfunc(f):
f._keycache_name = name
return f
return keyfunc
class OnDem(collections.defaultdict):
'''
A dictionary based caching on-demand resolver.
Example:
class Woot(OnDem):
@keymeth('foo')
def _getFooThing(self):
# only called once
return FooThing()
woot = Woot()
foo = woot.get('foo')
'''
def __init__(self):
collections.defaultdict.__init__(self)
self._key_funcs = {}
for name in dir(self):
attr = getattr(self,name,None)
keyn = getattr(attr,'_keycache_name',None)
if keyn == None:
continue
self._key_funcs[keyn] = attr
def __missing__(self, name):
func = self._key_funcs.get(name)
if func == None:
raise KeyError(name)
valu = func()
self[name] = valu
return valu
def get(self, name):
'''
Return the value for the given OnDem key.
Example:
woot = od.get('woot')
'''
return self[name]
def add(self, name, func, *args, **kwargs):
'''
Add a key lookup function callback to the OnDem dict.
Example:
def getfoo():
return FooThing()
od = OnDem()
od.add('foo', getfoo)
foo = od.get('foo')
'''
def keyfunc():
return func(*args,**kwargs)
self._key_funcs[name] = keyfunc
class KeyCache(collections.defaultdict):
'''
A fast key/val lookup cache.
Example:
cache = KeyCache( getFooThing )
valu = cache[x]
'''
def __init__(self, lookmeth):
collections.defaultdict.__init__(self)
self.lookmeth = lookmeth
def __missing__(self, key):
valu = self.lookmeth(key)
self[key] = valu
return valu
def pop(self, key):
return collections.defaultdict.pop(self, key, None)
def get(self, key):
return self[key]
def put(self, key, val):
self[key] = val
class RefDict:
'''
Allow reference counted ( and instance folded ) cache.
'''
def __init__(self):
self.vals = {}
self.lock = threading.Lock()
self.refs = collections.defaultdict(int)
def put(self, key, val):
with self.lock:
return self._put(key,val)
def get(self, key):
return self.vals.get(key)
def pop(self, key):
with self.lock:
return self._pop(key)
def _pop(self, key):
self.refs[key] -= 1
if self.refs[key] <= 0:
self.refs.pop(key,None)
return self.vals.pop(key,None)
def _put(self, key, val):
val = self.vals.setdefault(key,val)
self.refs[key] += 1
return val
def puts(self, items):
with self.lock:
return [ self._put(k,v) for (k,v) in items ]
def pops(self, keys):
with self.lock:
return [ self._pop(k) for k in keys ]
def __len__(self):
return len(self.vals)
| |
import json
import pytest
import responses
from twitch.client import TwitchClient
from twitch.constants import BASE_URL
from twitch.exceptions import TwitchAttributeException
from twitch.resources import Channel, Follow, Subscription, User, UserBlock
example_user = {
"_id": "44322889",
"name": "dallas",
}
example_channel = {
"_id": 121059319,
"name": "moonmoon_ow",
}
example_follow = {
"created_at": "2016-09-16T20:37:39Z",
"notifications": False,
"channel": example_channel,
}
example_block = {
"_id": 34105660,
"updated_at": "2016-12-15T18:58:11Z",
"user": example_user,
}
@responses.activate
def test_get():
responses.add(
responses.GET,
"{}user".format(BASE_URL),
body=json.dumps(example_user),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
user = client.users.get()
assert len(responses.calls) == 1
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
def test_get_by_id():
user_id = 1234
responses.add(
responses.GET,
"{}users/{}".format(BASE_URL, user_id),
body=json.dumps(example_user),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
user = client.users.get_by_id(user_id)
assert len(responses.calls) == 1
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
def test_get_emotes():
user_id = 1234
response = {"emoticon_sets": {"17937": [{"code": "Kappa", "id": 25}]}}
responses.add(
responses.GET,
"{}users/{}/emotes".format(BASE_URL, user_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
emotes = client.users.get_emotes(user_id)
assert len(responses.calls) == 1
assert isinstance(emotes, dict)
assert emotes["17937"] == response["emoticon_sets"]["17937"]
@responses.activate
def test_check_subscribed_to_channel():
user_id = 1234
channel_id = 12345
response = {
"_id": "c660cb408bc3b542f5bdbba52f3e638e652756b4",
"created_at": "2016-12-12T15:52:52Z",
"channel": example_channel,
}
responses.add(
responses.GET,
"{}users/{}/subscriptions/{}".format(BASE_URL, user_id, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
subscription = client.users.check_subscribed_to_channel(user_id, channel_id)
assert len(responses.calls) == 1
assert isinstance(subscription, Subscription)
assert subscription.id == response["_id"]
assert isinstance(subscription.channel, Channel)
assert subscription.channel.id == example_channel["_id"]
assert subscription.channel.name == example_channel["name"]
@responses.activate
def test_get_follows():
user_id = 1234
response = {"_total": 27, "follows": [example_follow]}
responses.add(
responses.GET,
"{}users/{}/follows/channels".format(BASE_URL, user_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
follows = client.users.get_follows(user_id)
assert len(responses.calls) == 1
assert len(follows) == 1
follow = follows[0]
assert isinstance(follow, Follow)
assert follow.notifications == example_follow["notifications"]
assert isinstance(follow.channel, Channel)
assert follow.channel.id == example_channel["_id"]
assert follow.channel.name == example_channel["name"]
@responses.activate
@pytest.mark.parametrize(
"param,value", [("limit", 101), ("direction", "abcd"), ("sort_by", "abcd")]
)
def test_get_follows_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.users.get_follows("1234", **kwargs)
@responses.activate
def test_get_all_follows():
user_id = 1234
response_with_offset = {"_total": 27, "_offset": 1234, "follows": [example_follow]}
response_without_offset = {"_total": 27, "follows": [example_follow]}
responses.add(
responses.GET,
"{}users/{}/follows/channels".format(BASE_URL, user_id),
body=json.dumps(response_with_offset),
status=200,
content_type="application/json",
)
responses.add(
responses.GET,
"{}users/{}/follows/channels".format(BASE_URL, user_id),
body=json.dumps(response_without_offset),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
follows = client.users.get_all_follows(
user_id, direction="desc", sort_by="last_broadcast"
)
assert len(responses.calls) == 2
assert len(follows) == 2
follow = follows[0]
assert isinstance(follow, Follow)
assert follow.notifications == example_follow["notifications"]
assert isinstance(follow.channel, Channel)
assert follow.channel.id == example_channel["_id"]
assert follow.channel.name == example_channel["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("direction", "abcd"), ("sort_by", "abcd")])
def test_get_all_follows_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.users.get_all_follows("1234", **kwargs)
@responses.activate
def test_check_follows_channel():
user_id = 1234
channel_id = 12345
responses.add(
responses.GET,
"{}users/{}/follows/channels/{}".format(BASE_URL, user_id, channel_id),
body=json.dumps(example_follow),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
follow = client.users.check_follows_channel(user_id, channel_id)
assert len(responses.calls) == 1
assert isinstance(follow, Follow)
assert follow.notifications == example_follow["notifications"]
assert isinstance(follow.channel, Channel)
assert follow.channel.id == example_channel["_id"]
assert follow.channel.name == example_channel["name"]
@responses.activate
def test_follow_channel():
user_id = 1234
channel_id = 12345
responses.add(
responses.PUT,
"{}users/{}/follows/channels/{}".format(BASE_URL, user_id, channel_id),
body=json.dumps(example_follow),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
follow = client.users.follow_channel(user_id, channel_id)
assert len(responses.calls) == 1
assert isinstance(follow, Follow)
assert follow.notifications == example_follow["notifications"]
assert isinstance(follow.channel, Channel)
assert follow.channel.id == example_channel["_id"]
assert follow.channel.name == example_channel["name"]
@responses.activate
def test_unfollow_channel():
user_id = 1234
channel_id = 12345
responses.add(
responses.DELETE,
"{}users/{}/follows/channels/{}".format(BASE_URL, user_id, channel_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.users.unfollow_channel(user_id, channel_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_user_block_list():
user_id = 1234
response = {"_total": 4, "blocks": [example_block]}
responses.add(
responses.GET,
"{}users/{}/blocks".format(BASE_URL, user_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
block_list = client.users.get_user_block_list(user_id)
assert len(responses.calls) == 1
assert len(block_list) == 1
block = block_list[0]
assert isinstance(block, UserBlock)
assert block.id == example_block["_id"]
assert isinstance(block.user, User)
assert block.user.id == example_user["_id"]
assert block.user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_user_block_list_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.users.get_user_block_list("1234", **kwargs)
@responses.activate
def test_block_user():
user_id = 1234
blocked_user_id = 12345
responses.add(
responses.PUT,
"{}users/{}/blocks/{}".format(BASE_URL, user_id, blocked_user_id),
body=json.dumps(example_block),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
block = client.users.block_user(user_id, blocked_user_id)
assert len(responses.calls) == 1
assert isinstance(block, UserBlock)
assert block.id == example_block["_id"]
assert isinstance(block.user, User)
assert block.user.id == example_user["_id"]
assert block.user.name == example_user["name"]
@responses.activate
def test_unblock_user():
user_id = 1234
blocked_user_id = 12345
responses.add(
responses.DELETE,
"{}users/{}/blocks/{}".format(BASE_URL, user_id, blocked_user_id),
body=json.dumps(example_block),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.users.unblock_user(user_id, blocked_user_id)
assert len(responses.calls) == 1
@responses.activate
def test_translate_usernames_to_ids():
response = {"users": [example_user]}
responses.add(
responses.GET,
"{}users".format(BASE_URL),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
users = client.users.translate_usernames_to_ids(["lirik"])
assert len(responses.calls) == 1
assert len(users) == 1
user = users[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.appengine_admin_v1.types import application as ga_application
from google.cloud.appengine_admin_v1.types import certificate as ga_certificate
from google.cloud.appengine_admin_v1.types import domain
from google.cloud.appengine_admin_v1.types import domain_mapping as ga_domain_mapping
from google.cloud.appengine_admin_v1.types import firewall
from google.cloud.appengine_admin_v1.types import instance
from google.cloud.appengine_admin_v1.types import service as ga_service
from google.cloud.appengine_admin_v1.types import version as ga_version
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.appengine.v1",
manifest={
"VersionView",
"AuthorizedCertificateView",
"DomainOverrideStrategy",
"GetApplicationRequest",
"CreateApplicationRequest",
"UpdateApplicationRequest",
"RepairApplicationRequest",
"ListServicesRequest",
"ListServicesResponse",
"GetServiceRequest",
"UpdateServiceRequest",
"DeleteServiceRequest",
"ListVersionsRequest",
"ListVersionsResponse",
"GetVersionRequest",
"CreateVersionRequest",
"UpdateVersionRequest",
"DeleteVersionRequest",
"ListInstancesRequest",
"ListInstancesResponse",
"GetInstanceRequest",
"DeleteInstanceRequest",
"DebugInstanceRequest",
"ListIngressRulesRequest",
"ListIngressRulesResponse",
"BatchUpdateIngressRulesRequest",
"BatchUpdateIngressRulesResponse",
"CreateIngressRuleRequest",
"GetIngressRuleRequest",
"UpdateIngressRuleRequest",
"DeleteIngressRuleRequest",
"ListAuthorizedDomainsRequest",
"ListAuthorizedDomainsResponse",
"ListAuthorizedCertificatesRequest",
"ListAuthorizedCertificatesResponse",
"GetAuthorizedCertificateRequest",
"CreateAuthorizedCertificateRequest",
"UpdateAuthorizedCertificateRequest",
"DeleteAuthorizedCertificateRequest",
"ListDomainMappingsRequest",
"ListDomainMappingsResponse",
"GetDomainMappingRequest",
"CreateDomainMappingRequest",
"UpdateDomainMappingRequest",
"DeleteDomainMappingRequest",
},
)
class VersionView(proto.Enum):
r"""Fields that should be returned when
[Version][google.appengine.v1.Version] resources are retrieved.
"""
BASIC = 0
FULL = 1
class AuthorizedCertificateView(proto.Enum):
r"""Fields that should be returned when an AuthorizedCertificate
resource is retrieved.
"""
BASIC_CERTIFICATE = 0
FULL_CERTIFICATE = 1
class DomainOverrideStrategy(proto.Enum):
r"""Override strategy for mutating an existing mapping."""
UNSPECIFIED_DOMAIN_OVERRIDE_STRATEGY = 0
STRICT = 1
OVERRIDE = 2
class GetApplicationRequest(proto.Message):
r"""Request message for ``Applications.GetApplication``.
Attributes:
name (str):
Name of the Application resource to get. Example:
``apps/myapp``.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateApplicationRequest(proto.Message):
r"""Request message for ``Applications.CreateApplication``.
Attributes:
application (google.cloud.appengine_admin_v1.types.Application):
Application configuration.
"""
application = proto.Field(
proto.MESSAGE, number=2, message=ga_application.Application,
)
class UpdateApplicationRequest(proto.Message):
r"""Request message for ``Applications.UpdateApplication``.
Attributes:
name (str):
Name of the Application resource to update. Example:
``apps/myapp``.
application (google.cloud.appengine_admin_v1.types.Application):
An Application containing the updated
resource.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Standard field mask for the set of fields to
be updated.
"""
name = proto.Field(proto.STRING, number=1,)
application = proto.Field(
proto.MESSAGE, number=2, message=ga_application.Application,
)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class RepairApplicationRequest(proto.Message):
r"""Request message for 'Applications.RepairApplication'.
Attributes:
name (str):
Name of the application to repair. Example: ``apps/myapp``
"""
name = proto.Field(proto.STRING, number=1,)
class ListServicesRequest(proto.Message):
r"""Request message for ``Services.ListServices``.
Attributes:
parent (str):
Name of the parent Application resource. Example:
``apps/myapp``.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListServicesResponse(proto.Message):
r"""Response message for ``Services.ListServices``.
Attributes:
services (Sequence[google.cloud.appengine_admin_v1.types.Service]):
The services belonging to the requested
application.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
services = proto.RepeatedField(proto.MESSAGE, number=1, message=ga_service.Service,)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetServiceRequest(proto.Message):
r"""Request message for ``Services.GetService``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default``.
"""
name = proto.Field(proto.STRING, number=1,)
class UpdateServiceRequest(proto.Message):
r"""Request message for ``Services.UpdateService``.
Attributes:
name (str):
Name of the resource to update. Example:
``apps/myapp/services/default``.
service (google.cloud.appengine_admin_v1.types.Service):
A Service resource containing the updated
service. Only fields set in the field mask will
be updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Standard field mask for the set of fields to
be updated.
migrate_traffic (bool):
Set to ``true`` to gradually shift traffic to one or more
versions that you specify. By default, traffic is shifted
immediately. For gradual traffic migration, the target
versions must be located within instances that are
configured for both `warmup
requests <https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#InboundServiceType>`__
and `automatic
scaling <https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#AutomaticScaling>`__.
You must specify the
```shardBy`` <https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services#ShardBy>`__
field in the Service resource. Gradual traffic migration is
not supported in the App Engine flexible environment. For
examples, see `Migrating and Splitting
Traffic <https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic>`__.
"""
name = proto.Field(proto.STRING, number=1,)
service = proto.Field(proto.MESSAGE, number=2, message=ga_service.Service,)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
migrate_traffic = proto.Field(proto.BOOL, number=4,)
class DeleteServiceRequest(proto.Message):
r"""Request message for ``Services.DeleteService``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListVersionsRequest(proto.Message):
r"""Request message for ``Versions.ListVersions``.
Attributes:
parent (str):
Name of the parent Service resource. Example:
``apps/myapp/services/default``.
view (google.cloud.appengine_admin_v1.types.VersionView):
Controls the set of fields returned in the ``List``
response.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
"""
parent = proto.Field(proto.STRING, number=1,)
view = proto.Field(proto.ENUM, number=2, enum="VersionView",)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListVersionsResponse(proto.Message):
r"""Response message for ``Versions.ListVersions``.
Attributes:
versions (Sequence[google.cloud.appengine_admin_v1.types.Version]):
The versions belonging to the requested
service.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(proto.MESSAGE, number=1, message=ga_version.Version,)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetVersionRequest(proto.Message):
r"""Request message for ``Versions.GetVersion``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default/versions/v1``.
view (google.cloud.appengine_admin_v1.types.VersionView):
Controls the set of fields returned in the ``Get`` response.
"""
name = proto.Field(proto.STRING, number=1,)
view = proto.Field(proto.ENUM, number=2, enum="VersionView",)
class CreateVersionRequest(proto.Message):
r"""Request message for ``Versions.CreateVersion``.
Attributes:
parent (str):
Name of the parent resource to create this version under.
Example: ``apps/myapp/services/default``.
version (google.cloud.appengine_admin_v1.types.Version):
Application deployment configuration.
"""
parent = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.MESSAGE, number=2, message=ga_version.Version,)
class UpdateVersionRequest(proto.Message):
r"""Request message for ``Versions.UpdateVersion``.
Attributes:
name (str):
Name of the resource to update. Example:
``apps/myapp/services/default/versions/1``.
version (google.cloud.appengine_admin_v1.types.Version):
A Version containing the updated resource.
Only fields set in the field mask will be
updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Standard field mask for the set of fields to
be updated.
"""
name = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.MESSAGE, number=2, message=ga_version.Version,)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""Request message for ``Versions.DeleteVersion``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default/versions/v1``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListInstancesRequest(proto.Message):
r"""Request message for ``Instances.ListInstances``.
Attributes:
parent (str):
Name of the parent Version resource. Example:
``apps/myapp/services/default/versions/v1``.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListInstancesResponse(proto.Message):
r"""Response message for ``Instances.ListInstances``.
Attributes:
instances (Sequence[google.cloud.appengine_admin_v1.types.Instance]):
The instances belonging to the requested
version.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
instances = proto.RepeatedField(proto.MESSAGE, number=1, message=instance.Instance,)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetInstanceRequest(proto.Message):
r"""Request message for ``Instances.GetInstance``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default/versions/v1/instances/instance-1``.
"""
name = proto.Field(proto.STRING, number=1,)
class DeleteInstanceRequest(proto.Message):
r"""Request message for ``Instances.DeleteInstance``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default/versions/v1/instances/instance-1``.
"""
name = proto.Field(proto.STRING, number=1,)
class DebugInstanceRequest(proto.Message):
r"""Request message for ``Instances.DebugInstance``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/services/default/versions/v1/instances/instance-1``.
ssh_key (str):
Public SSH key to add to the instance. Examples:
- ``[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]``
- ``[USERNAME]:ssh-rsa [KEY_VALUE] google-ssh {"userName":"[USERNAME]","expireOn":"[EXPIRE_TIME]"}``
For more information, see `Adding and Removing SSH
Keys <https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys>`__.
"""
name = proto.Field(proto.STRING, number=1,)
ssh_key = proto.Field(proto.STRING, number=2,)
class ListIngressRulesRequest(proto.Message):
r"""Request message for ``Firewall.ListIngressRules``.
Attributes:
parent (str):
Name of the Firewall collection to retrieve. Example:
``apps/myapp/firewall/ingressRules``.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
matching_address (str):
A valid IP Address. If set, only rules
matching this address will be returned. The
first returned rule will be the rule that fires
on requests from this IP.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
matching_address = proto.Field(proto.STRING, number=4,)
class ListIngressRulesResponse(proto.Message):
r"""Response message for ``Firewall.ListIngressRules``.
Attributes:
ingress_rules (Sequence[google.cloud.appengine_admin_v1.types.FirewallRule]):
The ingress FirewallRules for this
application.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
ingress_rules = proto.RepeatedField(
proto.MESSAGE, number=1, message=firewall.FirewallRule,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class BatchUpdateIngressRulesRequest(proto.Message):
r"""Request message for ``Firewall.BatchUpdateIngressRules``.
Attributes:
name (str):
Name of the Firewall collection to set. Example:
``apps/myapp/firewall/ingressRules``.
ingress_rules (Sequence[google.cloud.appengine_admin_v1.types.FirewallRule]):
A list of FirewallRules to replace the
existing set.
"""
name = proto.Field(proto.STRING, number=1,)
ingress_rules = proto.RepeatedField(
proto.MESSAGE, number=2, message=firewall.FirewallRule,
)
class BatchUpdateIngressRulesResponse(proto.Message):
r"""Response message for ``Firewall.UpdateAllIngressRules``.
Attributes:
ingress_rules (Sequence[google.cloud.appengine_admin_v1.types.FirewallRule]):
The full list of ingress FirewallRules for
this application.
"""
ingress_rules = proto.RepeatedField(
proto.MESSAGE, number=1, message=firewall.FirewallRule,
)
class CreateIngressRuleRequest(proto.Message):
r"""Request message for ``Firewall.CreateIngressRule``.
Attributes:
parent (str):
Name of the parent Firewall collection in which to create a
new rule. Example: ``apps/myapp/firewall/ingressRules``.
rule (google.cloud.appengine_admin_v1.types.FirewallRule):
A FirewallRule containing the new resource.
The user may optionally provide a position at
which the new rule will be placed. The positions
define a sequential list starting at 1. If a
rule already exists at the given position, rules
greater than the provided position will be moved
forward by one.
If no position is provided, the server will
place the rule as the second to last rule in the
sequence before the required default allow-all
or deny-all rule.
"""
parent = proto.Field(proto.STRING, number=1,)
rule = proto.Field(proto.MESSAGE, number=2, message=firewall.FirewallRule,)
class GetIngressRuleRequest(proto.Message):
r"""Request message for ``Firewall.GetIngressRule``.
Attributes:
name (str):
Name of the Firewall resource to retrieve. Example:
``apps/myapp/firewall/ingressRules/100``.
"""
name = proto.Field(proto.STRING, number=1,)
class UpdateIngressRuleRequest(proto.Message):
r"""Request message for ``Firewall.UpdateIngressRule``.
Attributes:
name (str):
Name of the Firewall resource to update. Example:
``apps/myapp/firewall/ingressRules/100``.
rule (google.cloud.appengine_admin_v1.types.FirewallRule):
A FirewallRule containing the updated
resource
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Standard field mask for the set of fields to
be updated.
"""
name = proto.Field(proto.STRING, number=1,)
rule = proto.Field(proto.MESSAGE, number=2, message=firewall.FirewallRule,)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteIngressRuleRequest(proto.Message):
r"""Request message for ``Firewall.DeleteIngressRule``.
Attributes:
name (str):
Name of the Firewall resource to delete. Example:
``apps/myapp/firewall/ingressRules/100``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListAuthorizedDomainsRequest(proto.Message):
r"""Request message for ``AuthorizedDomains.ListAuthorizedDomains``.
Attributes:
parent (str):
Name of the parent Application resource. Example:
``apps/myapp``.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListAuthorizedDomainsResponse(proto.Message):
r"""Response message for ``AuthorizedDomains.ListAuthorizedDomains``.
Attributes:
domains (Sequence[google.cloud.appengine_admin_v1.types.AuthorizedDomain]):
The authorized domains belonging to the user.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
domains = proto.RepeatedField(
proto.MESSAGE, number=1, message=domain.AuthorizedDomain,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class ListAuthorizedCertificatesRequest(proto.Message):
r"""Request message for
``AuthorizedCertificates.ListAuthorizedCertificates``.
Attributes:
parent (str):
Name of the parent ``Application`` resource. Example:
``apps/myapp``.
view (google.cloud.appengine_admin_v1.types.AuthorizedCertificateView):
Controls the set of fields returned in the ``LIST``
response.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
"""
parent = proto.Field(proto.STRING, number=1,)
view = proto.Field(proto.ENUM, number=4, enum="AuthorizedCertificateView",)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListAuthorizedCertificatesResponse(proto.Message):
r"""Response message for
``AuthorizedCertificates.ListAuthorizedCertificates``.
Attributes:
certificates (Sequence[google.cloud.appengine_admin_v1.types.AuthorizedCertificate]):
The SSL certificates the user is authorized
to administer.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
certificates = proto.RepeatedField(
proto.MESSAGE, number=1, message=ga_certificate.AuthorizedCertificate,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetAuthorizedCertificateRequest(proto.Message):
r"""Request message for
``AuthorizedCertificates.GetAuthorizedCertificate``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/authorizedCertificates/12345``.
view (google.cloud.appengine_admin_v1.types.AuthorizedCertificateView):
Controls the set of fields returned in the ``GET`` response.
"""
name = proto.Field(proto.STRING, number=1,)
view = proto.Field(proto.ENUM, number=2, enum="AuthorizedCertificateView",)
class CreateAuthorizedCertificateRequest(proto.Message):
r"""Request message for
``AuthorizedCertificates.CreateAuthorizedCertificate``.
Attributes:
parent (str):
Name of the parent ``Application`` resource. Example:
``apps/myapp``.
certificate (google.cloud.appengine_admin_v1.types.AuthorizedCertificate):
SSL certificate data.
"""
parent = proto.Field(proto.STRING, number=1,)
certificate = proto.Field(
proto.MESSAGE, number=2, message=ga_certificate.AuthorizedCertificate,
)
class UpdateAuthorizedCertificateRequest(proto.Message):
r"""Request message for
``AuthorizedCertificates.UpdateAuthorizedCertificate``.
Attributes:
name (str):
Name of the resource to update. Example:
``apps/myapp/authorizedCertificates/12345``.
certificate (google.cloud.appengine_admin_v1.types.AuthorizedCertificate):
An ``AuthorizedCertificate`` containing the updated
resource. Only fields set in the field mask will be updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Standard field mask for the set of fields to be updated.
Updates are only supported on the ``certificate_raw_data``
and ``display_name`` fields.
"""
name = proto.Field(proto.STRING, number=1,)
certificate = proto.Field(
proto.MESSAGE, number=2, message=ga_certificate.AuthorizedCertificate,
)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteAuthorizedCertificateRequest(proto.Message):
r"""Request message for
``AuthorizedCertificates.DeleteAuthorizedCertificate``.
Attributes:
name (str):
Name of the resource to delete. Example:
``apps/myapp/authorizedCertificates/12345``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListDomainMappingsRequest(proto.Message):
r"""Request message for ``DomainMappings.ListDomainMappings``.
Attributes:
parent (str):
Name of the parent Application resource. Example:
``apps/myapp``.
page_size (int):
Maximum results to return per page.
page_token (str):
Continuation token for fetching the next page
of results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListDomainMappingsResponse(proto.Message):
r"""Response message for ``DomainMappings.ListDomainMappings``.
Attributes:
domain_mappings (Sequence[google.cloud.appengine_admin_v1.types.DomainMapping]):
The domain mappings for the application.
next_page_token (str):
Continuation token for fetching the next page
of results.
"""
@property
def raw_page(self):
return self
domain_mappings = proto.RepeatedField(
proto.MESSAGE, number=1, message=ga_domain_mapping.DomainMapping,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetDomainMappingRequest(proto.Message):
r"""Request message for ``DomainMappings.GetDomainMapping``.
Attributes:
name (str):
Name of the resource requested. Example:
``apps/myapp/domainMappings/example.com``.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateDomainMappingRequest(proto.Message):
r"""Request message for ``DomainMappings.CreateDomainMapping``.
Attributes:
parent (str):
Name of the parent Application resource. Example:
``apps/myapp``.
domain_mapping (google.cloud.appengine_admin_v1.types.DomainMapping):
Domain mapping configuration.
override_strategy (google.cloud.appengine_admin_v1.types.DomainOverrideStrategy):
Whether the domain creation should override
any existing mappings for this domain. By
default, overrides are rejected.
"""
parent = proto.Field(proto.STRING, number=1,)
domain_mapping = proto.Field(
proto.MESSAGE, number=2, message=ga_domain_mapping.DomainMapping,
)
override_strategy = proto.Field(
proto.ENUM, number=4, enum="DomainOverrideStrategy",
)
class UpdateDomainMappingRequest(proto.Message):
r"""Request message for ``DomainMappings.UpdateDomainMapping``.
Attributes:
name (str):
Name of the resource to update. Example:
``apps/myapp/domainMappings/example.com``.
domain_mapping (google.cloud.appengine_admin_v1.types.DomainMapping):
A domain mapping containing the updated
resource. Only fields set in the field mask will
be updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Standard field mask for the set of fields to
be updated.
"""
name = proto.Field(proto.STRING, number=1,)
domain_mapping = proto.Field(
proto.MESSAGE, number=2, message=ga_domain_mapping.DomainMapping,
)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteDomainMappingRequest(proto.Message):
r"""Request message for ``DomainMappings.DeleteDomainMapping``.
Attributes:
name (str):
Name of the resource to delete. Example:
``apps/myapp/domainMappings/example.com``.
"""
name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
#!/usr/bin/env python3
"""
$ ./tools/js-dep-visualizer.py
$ dot -Tpng var/zulip-deps.dot -o var/zulip-deps.png
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import subprocess
import sys
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Set, Tuple
Edge = Tuple[str, str]
EdgeSet = Set[Edge]
Method = str
MethodDict = DefaultDict[Edge, List[Method]]
TOOLS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(TOOLS_DIR)
sys.path.insert(0, ROOT_DIR)
from tools.lib.graph import (
Graph,
make_dot_file,
best_edge_to_remove,
)
JS_FILES_DIR = os.path.join(ROOT_DIR, 'static/js')
OUTPUT_FILE_PATH = os.path.relpath(os.path.join(ROOT_DIR, 'var/zulip-deps.dot'))
PNG_FILE_PATH = os.path.relpath(os.path.join(ROOT_DIR, 'var/zulip-deps.png'))
def get_js_edges():
# type: () -> Tuple[EdgeSet, MethodDict]
names = set()
modules = [] # type: List[Dict[str, Any]]
for js_file in os.listdir(JS_FILES_DIR):
if not js_file.endswith('.js'):
continue
name = js_file[:-3] # remove .js
path = os.path.join(JS_FILES_DIR, js_file)
names.add(name)
modules.append(dict(
name=name,
path=path,
regex=re.compile('[^_]{}\.\w+\('.format(name))
))
comment_regex = re.compile('\s+//')
call_regex = re.compile('[^_](\w+\.\w+)\(')
methods = defaultdict(list) # type: DefaultDict[Edge, List[Method]]
edges = set()
for module in modules:
parent = module['name']
with open(module['path']) as f:
for line in f:
if comment_regex.match(line):
continue
if 'subs.forEach' in line:
continue
m = call_regex.search(line)
if not m:
continue
for g in m.groups():
child, method = g.split('.')
if (child not in names):
continue
if child == parent:
continue
tup = (parent, child)
edges.add(tup)
methods[tup].append(method)
return edges, methods
def find_edges_to_remove(graph, methods):
# type: (Graph, MethodDict) -> Tuple[Graph, List[Edge]]
EXEMPT_EDGES = [
# These are sensible dependencies, so don't cut them.
('rows', 'message_store'),
('filter', 'stream_data'),
('server_events', 'user_events'),
('compose_fade', 'stream_data'),
('narrow', 'message_list'),
('stream_list', 'topic_list',),
('subs', 'stream_muting'),
('hashchange', 'settings'),
('tutorial', 'narrow'),
('activity', 'resize'),
('hashchange', 'drafts'),
('compose', 'echo'),
('compose', 'resize'),
('settings', 'resize'),
('settings', 'settings_lab'),
('settings_lab', 'resize'),
('compose', 'unread_ops'),
('compose', 'drafts'),
('echo', 'message_edit'),
('echo', 'stream_list'),
('hashchange', 'narrow'),
('hashchange', 'subs'),
('message_edit', 'echo'),
('popovers', 'message_edit'),
('unread_ui', 'activity'),
('message_fetch', 'message_util'),
('message_fetch', 'resize'),
('message_util', 'resize'),
('notifications', 'tutorial'),
('message_util', 'unread_ui'),
('muting_ui', 'stream_list'),
('muting_ui', 'unread_ui'),
('stream_popover', 'subs'),
('stream_popover', 'muting_ui'),
('narrow', 'message_fetch'),
('narrow', 'message_util'),
('narrow', 'navigate'),
('unread_ops', 'unread_ui'),
('narrow', 'unread_ops'),
('navigate', 'unread_ops'),
('pm_list', 'unread_ui'),
('stream_list', 'unread_ui'),
('popovers', 'compose'),
('popovers', 'muting_ui'),
('popovers', 'narrow'),
('popovers', 'resize'),
('pm_list', 'resize'),
('notifications', 'navigate'),
('compose', 'socket'),
('stream_muting', 'message_util'),
('subs', 'stream_list'),
('ui', 'message_fetch'),
('ui', 'unread_ops'),
('condense', 'message_viewport'),
('compose_actions', 'compose'),
('compose_actions', 'resize'),
('settings_streams', 'stream_data'),
('drafts', 'hashchange'),
('settings_notifications', 'stream_edit'),
('compose', 'stream_edit'),
('subs', 'stream_edit'),
('narrow_state', 'stream_data'),
('stream_edit', 'stream_list'),
('reactions', 'emoji_picker'),
('message_edit', 'resize'),
] # type: List[Edge]
def is_exempt(edge):
# type: (Tuple[str, str]) -> bool
parent, child = edge
if edge == ('server_events', 'reload'):
return False
if parent in ['server_events', 'user_events', 'stream_events',
'message_events', 'reload']:
return True
if child == 'rows':
return True
return edge in EXEMPT_EDGES
APPROVED_CUTS = [
('stream_edit', 'stream_events'),
('unread_ui', 'pointer'),
('typing_events', 'narrow'),
('echo', 'message_events'),
('resize', 'navigate'),
('narrow', 'search'),
('subs', 'stream_events'),
('stream_color', 'tab_bar'),
('stream_color', 'subs'),
('stream_data', 'narrow'),
('unread', 'narrow'),
('composebox_typeahead', 'compose'),
('message_list', 'message_edit'),
('message_edit', 'compose'),
('message_store', 'compose'),
('settings_notifications', 'subs'),
('settings', 'settings_muting'),
('message_fetch', 'tutorial'),
('settings', 'subs'),
('activity', 'narrow'),
('compose', 'compose_actions'),
('compose', 'subs'),
('compose_actions', 'drafts'),
('compose_actions', 'narrow'),
('compose_actions', 'unread_ops'),
('drafts', 'compose'),
('drafts', 'echo'),
('echo', 'compose'),
('echo', 'narrow'),
('echo', 'pm_list'),
('echo', 'ui'),
('message_fetch', 'activity'),
('message_fetch', 'narrow'),
('message_fetch', 'pm_list'),
('message_fetch', 'stream_list'),
('message_fetch', 'ui'),
('narrow', 'ui'),
('message_util', 'compose'),
('subs', 'compose'),
('narrow', 'hashchange'),
('subs', 'hashchange'),
('navigate', 'narrow'),
('navigate', 'stream_list'),
('pm_list', 'narrow'),
('pm_list', 'stream_popover'),
('muting_ui', 'stream_popover'),
('popovers', 'stream_popover'),
('topic_list', 'stream_popover'),
('stream_edit', 'subs'),
('topic_list', 'narrow'),
('stream_list', 'narrow'),
('stream_list', 'pm_list'),
('stream_list', 'unread_ops'),
('notifications', 'ui'),
('notifications', 'narrow'),
('notifications', 'unread_ops'),
('typing', 'narrow'),
('message_events', 'compose'),
('stream_muting', 'stream_list'),
('subs', 'narrow'),
('unread_ui', 'pm_list'),
('unread_ui', 'stream_list'),
('overlays', 'hashchange'),
('emoji_picker', 'reactions'),
]
def cut_is_legal(edge):
# type: (Edge) -> bool
parent, child = edge
if child in ['reload', 'popovers', 'overlays', 'notifications',
'server_events', 'compose_actions']:
return True
return edge in APPROVED_CUTS
graph.remove_exterior_nodes()
removed_edges = list()
print()
while graph.num_edges() > 0:
edge = best_edge_to_remove(graph, is_exempt)
if edge is None:
print('we may not be allowing edge cuts!!!')
break
if cut_is_legal(edge):
graph = graph.minus_edge(edge)
graph.remove_exterior_nodes()
removed_edges.append(edge)
else:
for removed_edge in removed_edges:
print(removed_edge)
print()
edge_str = str(edge) + ','
print(edge_str)
for method in methods[edge]:
print(' ' + method)
break
return graph, removed_edges
def report_roadmap(edges, methods):
# type: (List[Edge], MethodDict) -> None
child_modules = {child for parent, child in edges}
module_methods = defaultdict(set) # type: DefaultDict[str, Set[str]]
callers = defaultdict(set) # type: DefaultDict[Tuple[str, str], Set[str]]
for parent, child in edges:
for method in methods[(parent, child)]:
module_methods[child].add(method)
callers[(child, method)].add(parent)
for child in sorted(child_modules):
print(child + '.js')
for method in module_methods[child]:
print(' ' + child + '.' + method)
for caller in sorted(callers[(child, method)]):
print(' ' + caller + '.js')
print()
print()
def produce_partial_output(graph):
# type: (Graph) -> None
print(graph.num_edges())
buffer = make_dot_file(graph)
graph.report()
with open(OUTPUT_FILE_PATH, 'w') as f:
f.write(buffer)
subprocess.check_call(["dot", "-Tpng", OUTPUT_FILE_PATH, "-o", PNG_FILE_PATH])
print()
print('See dot file here: {}'.format(OUTPUT_FILE_PATH))
print('See output png file: {}'.format(PNG_FILE_PATH))
def run():
# type: () -> None
edges, methods = get_js_edges()
graph = Graph(edges)
graph, removed_edges = find_edges_to_remove(graph, methods)
if graph.num_edges() == 0:
report_roadmap(removed_edges, methods)
else:
produce_partial_output(graph)
if __name__ == '__main__':
run()
| |
# -*- coding: utf-8 -*-
""" QuerySet for PolymorphicModel
Please see README.rst or DOCS.rst or http://chrisglass.github.com/django_polymorphic/
"""
from __future__ import absolute_import
from collections import defaultdict
import django
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from .query_translate import translate_polymorphic_filter_definitions_in_kwargs, translate_polymorphic_filter_definitions_in_args
from .query_translate import translate_polymorphic_field_path
# chunk-size: maximum number of objects requested per db-request
# by the polymorphic queryset.iterator() implementation; we use the same chunk size as Django
try:
from django.db.models.query import CHUNK_SIZE # this is 100 for Django 1.1/1.2
except ImportError:
# CHUNK_SIZE was removed in Django 1.6
CHUNK_SIZE = 100
Polymorphic_QuerySet_objects_per_request = CHUNK_SIZE
def transmogrify(cls, obj):
"""
Upcast a class to a different type without asking questions.
"""
if not '__init__' in obj.__dict__:
# Just assign __class__ to a different value.
new = obj
new.__class__ = cls
else:
# Run constructor, reassign values
new = cls()
for k,v in obj.__dict__.items():
new.__dict__[k] = v
return new
###################################################################################
### PolymorphicQuerySet
class PolymorphicQuerySet(QuerySet):
"""
QuerySet for PolymorphicModel
Contains the core functionality for PolymorphicModel
Usually not explicitly needed, except if a custom queryset class
is to be used.
"""
def __init__(self, *args, **kwargs):
"init our queryset object member variables"
self.polymorphic_disabled = False
super(PolymorphicQuerySet, self).__init__(*args, **kwargs)
def _clone(self, *args, **kwargs):
"Django's _clone only copies its own variables, so we need to copy ours here"
new = super(PolymorphicQuerySet, self)._clone(*args, **kwargs)
new.polymorphic_disabled = self.polymorphic_disabled
return new
def non_polymorphic(self, *args, **kwargs):
"""switch off polymorphic behaviour for this query.
When the queryset is evaluated, only objects of the type of the
base class used for this query are returned."""
qs = self._clone()
qs.polymorphic_disabled = True
return qs
def instance_of(self, *args):
"""Filter the queryset to only include the classes in args (and their subclasses).
Implementation in _translate_polymorphic_filter_defnition."""
return self.filter(instance_of=args)
def not_instance_of(self, *args):
"""Filter the queryset to exclude the classes in args (and their subclasses).
Implementation in _translate_polymorphic_filter_defnition."""
return self.filter(not_instance_of=args)
def _filter_or_exclude(self, negate, *args, **kwargs):
"We override this internal Django functon as it is used for all filter member functions."
translate_polymorphic_filter_definitions_in_args(self.model, args) # the Q objects
additional_args = translate_polymorphic_filter_definitions_in_kwargs(self.model, kwargs) # filter_field='data'
return super(PolymorphicQuerySet, self)._filter_or_exclude(negate, *(list(args) + additional_args), **kwargs)
def order_by(self, *args, **kwargs):
"""translate the field paths in the args, then call vanilla order_by."""
new_args = [translate_polymorphic_field_path(self.model, a) for a in args]
return super(PolymorphicQuerySet, self).order_by(*new_args, **kwargs)
def _process_aggregate_args(self, args, kwargs):
"""for aggregate and annotate kwargs: allow ModelX___field syntax for kwargs, forbid it for args.
Modifies kwargs if needed (these are Aggregate objects, we translate the lookup member variable)"""
def patch_lookup(a):
if django.VERSION < (1, 8):
a.lookup = translate_polymorphic_field_path(self.model, a.lookup)
else:
# With Django > 1.8, the field on which the aggregate operates is
# stored inside a query expression.
if hasattr(a, 'source_expressions'):
a.source_expressions[0].name = translate_polymorphic_field_path(
self.model, a.source_expressions[0].name)
get_lookup = lambda a: a.lookup if django.VERSION < (1, 8) else a.source_expressions[0].name
for a in args:
assert '___' not in get_lookup(a), 'PolymorphicModel: annotate()/aggregate(): ___ model lookup supported for keyword arguments only'
for a in six.itervalues(kwargs):
patch_lookup(a)
def annotate(self, *args, **kwargs):
"""translate the polymorphic field paths in the kwargs, then call vanilla annotate.
_get_real_instances will do the rest of the job after executing the query."""
self._process_aggregate_args(args, kwargs)
return super(PolymorphicQuerySet, self).annotate(*args, **kwargs)
def aggregate(self, *args, **kwargs):
"""translate the polymorphic field paths in the kwargs, then call vanilla aggregate.
We need no polymorphic object retrieval for aggregate => switch it off."""
self._process_aggregate_args(args, kwargs)
qs = self.non_polymorphic()
return super(PolymorphicQuerySet, qs).aggregate(*args, **kwargs)
# Since django_polymorphic 'V1.0 beta2', extra() always returns polymorphic results.^
# The resulting objects are required to have a unique primary key within the result set
# (otherwise an error is thrown).
# The "polymorphic" keyword argument is not supported anymore.
#def extra(self, *args, **kwargs):
def _get_real_instances(self, base_result_objects):
"""
Polymorphic object loader
Does the same as:
return [ o.get_real_instance() for o in base_result_objects ]
but more efficiently.
The list base_result_objects contains the objects from the executed
base class query. The class of all of them is self.model (our base model).
Some, many or all of these objects were not created and stored as
class self.model, but as a class derived from self.model. We want to re-fetch
these objects from the db as their original class so we can return them
just as they were created/saved.
We identify these objects by looking at o.polymorphic_ctype, which specifies
the real class of these objects (the class at the time they were saved).
First, we sort the result objects in base_result_objects for their
subclass (from o.polymorphic_ctype), and then we execute one db query per
subclass of objects. Here, we handle any annotations from annotate().
Finally we re-sort the resulting objects into the correct order and
return them as a list.
"""
ordered_id_list = [] # list of ids of result-objects in correct order
results = {} # polymorphic dict of result-objects, keyed with their id (no order)
# dict contains one entry per unique model type occurring in result,
# in the format idlist_per_model[modelclass]=[list-of-object-ids]
idlist_per_model = defaultdict(list)
# django's automatic ".pk" field does not always work correctly for
# custom fields in derived objects (unclear yet who to put the blame on).
# We get different type(o.pk) in this case.
# We work around this by using the real name of the field directly
# for accessing the primary key of the the derived objects.
# We might assume that self.model._meta.pk.name gives us the name of the primary key field,
# but it doesn't. Therefore we use polymorphic_primary_key_name, which we set up in base.py.
pk_name = self.model.polymorphic_primary_key_name
# - sort base_result_object ids into idlist_per_model lists, depending on their real class;
# - also record the correct result order in "ordered_id_list"
# - store objects that already have the correct class into "results"
base_result_objects_by_id = {}
self_model_class_id = ContentType.objects.get_for_model(self.model, for_concrete_model=False).pk
self_concrete_model_class_id = ContentType.objects.get_for_model(self.model, for_concrete_model=True).pk
for base_object in base_result_objects:
ordered_id_list.append(base_object.pk)
# check if id of the result object occeres more than once - this can happen e.g. with base_objects.extra(tables=...)
if not base_object.pk in base_result_objects_by_id:
base_result_objects_by_id[base_object.pk] = base_object
if base_object.polymorphic_ctype_id == self_model_class_id:
# Real class is exactly the same as base class, go straight to results
results[base_object.pk] = base_object
else:
real_concrete_class = base_object.get_real_instance_class()
real_concrete_class_id = base_object.get_real_concrete_instance_class_id()
if real_concrete_class_id is None:
# Dealing with a stale content type
continue
elif real_concrete_class_id == self_concrete_model_class_id:
# Real and base classes share the same concrete ancestor,
# upcast it and put it in the results
results[base_object.pk] = transmogrify(real_concrete_class, base_object)
else:
real_concrete_class = ContentType.objects.get_for_id(real_concrete_class_id).model_class()
idlist_per_model[real_concrete_class].append(getattr(base_object, pk_name))
# For each model in "idlist_per_model" request its objects (the real model)
# from the db and store them in results[].
# Then we copy the annotate fields from the base objects to the real objects.
# Then we copy the extra() select fields from the base objects to the real objects.
# TODO: defer(), only(): support for these would be around here
for real_concrete_class, idlist in idlist_per_model.items():
real_objects = real_concrete_class.base_objects.filter(**{
('%s__in' % pk_name): idlist,
})
real_objects.query.select_related = self.query.select_related # copy select related configuration to new qs
for real_object in real_objects:
o_pk = getattr(real_object, pk_name)
real_class = real_object.get_real_instance_class()
# If the real class is a proxy, upcast it
if real_class != real_concrete_class:
real_object = transmogrify(real_class, real_object)
if self.query.aggregates:
for anno_field_name in six.iterkeys(self.query.aggregates):
attr = getattr(base_result_objects_by_id[o_pk], anno_field_name)
setattr(real_object, anno_field_name, attr)
if self.query.extra_select:
for select_field_name in six.iterkeys(self.query.extra_select):
attr = getattr(base_result_objects_by_id[o_pk], select_field_name)
setattr(real_object, select_field_name, attr)
results[o_pk] = real_object
# re-create correct order and return result list
resultlist = [results[ordered_id] for ordered_id in ordered_id_list if ordered_id in results]
# set polymorphic_annotate_names in all objects (currently just used for debugging/printing)
if self.query.aggregates:
annotate_names = six.iterkeys(self.query.aggregates) # get annotate field list
for real_object in resultlist:
real_object.polymorphic_annotate_names = annotate_names
# set polymorphic_extra_select_names in all objects (currently just used for debugging/printing)
if self.query.extra_select:
extra_select_names = six.iterkeys(self.query.extra_select) # get extra select field list
for real_object in resultlist:
real_object.polymorphic_extra_select_names = extra_select_names
return resultlist
def iterator(self):
"""
This function is used by Django for all object retrieval.
By overriding it, we modify the objects that this queryset returns
when it is evaluated (or its get method or other object-returning methods are called).
Here we do the same as:
base_result_objects=list(super(PolymorphicQuerySet, self).iterator())
real_results=self._get_real_instances(base_result_objects)
for o in real_results: yield o
but it requests the objects in chunks from the database,
with Polymorphic_QuerySet_objects_per_request per chunk
"""
base_iter = super(PolymorphicQuerySet, self).iterator()
# disabled => work just like a normal queryset
if self.polymorphic_disabled:
for o in base_iter:
yield o
raise StopIteration
while True:
base_result_objects = []
reached_end = False
for i in range(Polymorphic_QuerySet_objects_per_request):
try:
o = next(base_iter)
base_result_objects.append(o)
except StopIteration:
reached_end = True
break
real_results = self._get_real_instances(base_result_objects)
for o in real_results:
yield o
if reached_end:
raise StopIteration
def __repr__(self, *args, **kwargs):
if self.model.polymorphic_query_multiline_output:
result = [repr(o) for o in self.all()]
return '[ ' + ',\n '.join(result) + ' ]'
else:
return super(PolymorphicQuerySet, self).__repr__(*args, **kwargs)
class _p_list_class(list):
def __repr__(self, *args, **kwargs):
result = [repr(o) for o in self]
return '[ ' + ',\n '.join(result) + ' ]'
def get_real_instances(self, base_result_objects=None):
"same as _get_real_instances, but make sure that __repr__ for ShowField... creates correct output"
if not base_result_objects:
base_result_objects = self
olist = self._get_real_instances(base_result_objects)
if not self.model.polymorphic_query_multiline_output:
return olist
clist = PolymorphicQuerySet._p_list_class(olist)
return clist
| |
'''
Created on Mar 21, 2017
@author: matija
FUNCTIONS FOR COMPUTING SIMILARITY BETWEEN TWO DATASETS BASED ON IT'S KEYS
HERE WE USED PERSON AS KEY AND THEIRS VALUES ARE RATINGS FOR THE MOVIES
WE COMPUTED A SIMILARITIES IN THIS MODULE BASED ON TWO FUNCTIONS
1.EUCLIDEAN DISTANCE
2.PEARSON
FROM MY EXPERIANCE RUNNING THESE FUNCTIONS AGAINST SAME DATA SET
1.EUCLIDEAN DISTANCE IS VERY HARSH ON THE DIFFERENT RATINGS AND DECREASES SIMILARITY BY BIG MARGIN
2.PEARSON IS NOT AS HARSH AS EUCLIDEAN AND LOOKS MORE NATURAL TO ME WHEN COMPUTING THE SIMILARITY RESULT
also from my experiance if the preferences are charted only in one region of the graph then pearson will behave very strange.
which means it looks good only if data is charted across the whole graph(or at least one preference take spot in every 'region')
'''
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
criticsCustom={
'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Superman':2.5, 'Batman':2,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, 'Superman':2.5, 'Batman':3,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,'Superman':2.5, 'Batman':4,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,'Superman':2.5, 'Batman':5,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
' You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,'Superman':2.5, 'Batman':5,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,'Superman':2.5, 'Batman':4,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':3.0,'You, Me and Dupree':3.5,'Superman Returns':5.0, 'Lady in the Water':3.0,'Superman':2.5, 'Batman':3},
'Matija' : {'Batman':3}
}
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
# if two films were plot on the graph (x and y axis) and the people were charted in preference space( some coordinate in the graph(x,y))
# then this function will calculate the distance between the two
#the higher the value the similar preferences are (people share same opinions about films)
def sim_euclidean_distance(prefs, person1, person2):
#Get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
#if they have no ratings in common , return 0
if len(si)==0: return 0
#add up squares of all the differences
sum_of_squares = sum( [pow(prefs[person1][item]-prefs[person2][item], 2) for item in prefs[person1] if item in prefs[person2]] )
return 1/(1+sum_of_squares)
#person 1 critics are represented on x axis while person2 critics on y axis
#films are charted in preference space matching the value of the persons axis
#this function will plot the line which is exactly in the middle of the each of persons critics for every movie
# and will return a score which represents the 'similarity of persons preferences
def sim_pearson(prefs, person1, person2):
#list of mutually rated items
si={}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
#the number of the elements
n = len(si)
#if no common critic subject
if n==0: return 0
#add up all the preferences
sum1 = sum([prefs[person1][it] for it in si])
sum2 = sum([prefs[person2][it] for it in si])
#add up all squares of the preferences
sum1SQ = sum( [pow(prefs[person1][it],2) for it in si] )
sum2SQ = sum( [pow(prefs[person2][it],2) for it in si] )
#sum up the products of the preferences(p1Pref*p2Pref)
pSum = sum ( [prefs[person1][it] * prefs[person2][it] for it in si] )
#compute pearson score
num = pSum-(sum1*sum2/n)
den = sqrt( (sum1SQ - pow(sum1,2)/n) * (sum2SQ - pow(sum2,2)/n) )
if den==0: return 0
return num/den
#compares every key(person) critics from data set against one entry(persons critics, in this case let it be mine)
#returns sorted list by best recommendations that should one follow(the argument person which was against whole set)
#so u can see which person have most similar tastes like you do and pick his advices for movies
def top_matches(prefs, person, n=5, similarity=sim_pearson ):
scores=[(similarity(prefs,person,other), other) for other in prefs if other != person]
#sort the list so the highest scores appear on top
scores.sort(reverse=True)
return scores[0:n]
#gets recommendations for films that person(argument) didn't watch
# but the people who have watched the same film as person did watch those films
#reccommendations are working on weighted average prinicpal
#of every other user's rankings
def getRecommendations(prefs, person ,precomputedUserSims=None, similarity=sim_pearson):
totals = {}
simSums = {}
if precomputedUserSims is None:
for other in prefs:
#don't compare me to myself
if other==person: continue
sim=similarity(prefs, person, other)
# ignore scores of zero or lower
if sim<=0: continue
for item in prefs[other]:
#only score movies that i haven't seen
if item not in prefs[person] or prefs[person][item]==0:
#Similarity * Score
totals.setdefault(item,0)
totals[item]+=prefs[other][item]*sim
#Sum of similarities
simSums.setdefault(item,0)
simSums[item]+=sim
else :
for sim,user in precomputedUserSims:
if person==user: continue
for item in prefs[user]:
if item not in prefs[person]:
totals.setdefault(item, 0)
totals[item] += prefs[user][item]*sim
simSums.setdefault(item,0)
simSums[item]+=sim
#Create normalized list
#to minimize the advantage of the films that were reviewed by the more users
rankings = [(total/simSums[item], item) for item,total in totals.items()]
rankings.sort(reverse=True)
return rankings
def userSimilarities(prefs, person, n):
return top_matches(prefs, person, n)
def tanimotoScore(a,b):
c = [v for v in a if v in b]
return (len(c)/(len(a)+len(b)-len(c)))
def transformPrefs(prefs):
result={}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
#flip the result
result[item][person] = prefs[person][item]
return result
def calculateSimilarItems(prefs, n=10):
#Create a dictionary of items showing which other items they are most similar to
result = {}
#invert the preference matrix to be item-centric
itemPrefs = transformPrefs(prefs)
c=0
for item in itemPrefs:
#Status updates for lage datasets
c+=1
if c%100==0: print "%d / %d" %(c,len(itemPrefs))
#Find most similar items to this one
scores = top_matches(itemPrefs, item, n, similarity=sim_euclidean_distance)
result[item]=scores
return result
def getRecommendedItems(prefs, itemMatch, user):
userRatings = prefs[user]
scores = {}
totalSim = {}
# Loop over items rated by this user
for (item,rating) in userRatings.items():
# Loop over items similar to this one
for (similarity,item2) in itemMatch[item]:
# Ignore if this user has already rated this item
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2,0)
scores[item2]+=similarity*rating
# Sum of all the similarities
totalSim.setdefault(item2,0)
totalSim[item2]+=similarity
# Divide each total score by total weighting to get an average
rankings = [(score/totalSim[item], item) for item,score in scores.items()]
# Return the rankings from highest to lowest
rankings.sort()
rankings.reverse()
return rankings
def loadMoviesLens(path='/home/matija/Desktop/ml-latest-small/'):
# Get movie titles
movies = {}
skipFirst=True
for line in open(path + "/movies.csv"):
if skipFirst :
skipFirst = not skipFirst
continue
(id,title)=line.split(',')[0:2]
movies[id]=title
# Load data
prefs={}
skipFirst=True
for line in open(path + '/ratings.csv'):
if skipFirst:
skipFirst = not skipFirst
continue
(user,movieid,rating)=line.split(',')[0:3]
prefs.setdefault(user,{})
prefs[user][movies[movieid]]=float(rating)
return prefs
print getRecommendations(critics, 'Toby')
precomputedUserSims = userSimilarities(critics, 'Toby', 5)
print getRecommendations(critics, 'Toby',precomputedUserSims)
| |
# orm/properties.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""MapperProperty implementations.
This is a private module which defines the behavior of invidual ORM-
mapped attributes.
"""
from __future__ import absolute_import
from .. import util, log
from ..sql import expression
from . import attributes
from .util import _orm_full_deannotate
from .interfaces import PropComparator, StrategizedProperty
__all__ = ['ColumnProperty', 'CompositeProperty', 'SynonymProperty',
'ComparableProperty', 'RelationshipProperty']
@log.class_logger
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`.orm.column_property` function.
"""
strategy_wildcard_key = 'column'
__slots__ = (
'_orig_columns', 'columns', 'group', 'deferred',
'instrument', 'comparator_factory', 'descriptor', 'extension',
'active_history', 'expire_on_flush', 'info', 'doc',
'strategy_class', '_creation_order', '_is_polymorphic_discriminator',
'_mapped_by_synonym', '_deferred_loader')
def __init__(self, *columns, **kwargs):
"""Provide a column-level property for use with a Mapper.
Column-based properties can normally be applied to the mapper's
``properties`` dictionary using the :class:`.Column` element directly.
Use this function when the given column is not directly present within
the mapper's selectable; examples include SQL expressions, functions,
and scalar SELECT queries.
Columns that aren't present in the mapper's selectable won't be
persisted by the mapper and are effectively "read-only" attributes.
:param \*cols:
list of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. Normally, history tracking logic for
simple non-primary-key scalar values only needs to be
aware of the "new" value in order to perform a flush. This
flag is available for applications that make use of
:func:`.attributes.get_history` or :meth:`.Session.is_modified`
which also need to know
the "previous" value of the attribute.
.. versionadded:: 0.6.6
:param comparator_factory: a class which extends
:class:`.ColumnProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param group:
a group name for this property when marked as deferred.
:param deferred:
when True, the column property is "deferred", meaning that
it does not load immediately, and is instead loaded when the
attribute is first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param expire_on_flush=True:
Disable expiry on flush. A column_property() which refers
to a SQL expression (and not a single table-bound column)
is considered to be a "read only" property; populating it
has no effect on the state of data, and it can only return
database state. For this reason a column_property()'s value
is expired whenever the parent object is involved in a
flush, that is, has any kind of "dirty" state within a flush.
Setting this parameter to ``False`` will have the effect of
leaving any existing value present after the flush proceeds.
Note however that the :class:`.Session` with default expiration
settings still expires
all attributes after a :meth:`.Session.commit` call, however.
.. versionadded:: 0.7.3
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an
:class:`.AttributeExtension`
instance, or list of extensions, which will be prepended
to the list of attribute listeners for the resulting
descriptor placed on the class.
**Deprecated.** Please see :class:`.AttributeEvents`.
"""
super(ColumnProperty, self).__init__()
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [expression._labeled(_orm_full_deannotate(c))
for c in columns]
self.group = kwargs.pop('group', None)
self.deferred = kwargs.pop('deferred', False)
self.instrument = kwargs.pop('_instrument', True)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
self.descriptor = kwargs.pop('descriptor', None)
self.extension = kwargs.pop('extension', None)
self.active_history = kwargs.pop('active_history', False)
self.expire_on_flush = kwargs.pop('expire_on_flush', True)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if 'doc' in kwargs:
self.doc = kwargs.pop('doc')
else:
for col in reversed(self.columns):
doc = getattr(col, 'doc', None)
if doc is not None:
self.doc = doc
break
else:
self.doc = None
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s" % (
self.__class__.__name__,
', '.join(sorted(kwargs.keys()))))
util.set_creation_order(self)
self.strategy_class = self._strategy_lookup(
("deferred", self.deferred),
("instrument", self.instrument)
)
@util.dependencies("sqlalchemy.orm.state", "sqlalchemy.orm.strategies")
def _memoized_attr__deferred_column_loader(self, state, strategies):
return state.InstanceState._instance_level_callable_processor(
self.parent.class_manager,
strategies.LoadDeferredColumns(self.key), self.key)
@property
def expression(self):
"""Return the primary column or expression for this ColumnProperty.
"""
return self.columns[0]
def instrument_class(self, mapper):
if not self.instrument:
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc
)
def do_init(self):
super(ColumnProperty, self).do_init()
if len(self.columns) > 1 and \
set(self.parent.primary_key).issuperset(self.columns):
util.warn(
("On mapper %s, primary key column '%s' is being combined "
"with distinct primary key column '%s' in attribute '%s'. "
"Use explicit properties to give each column its own mapped "
"attribute name.") % (self.parent, self.columns[1],
self.columns[0], self.key))
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns)
def _getcommitted(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
return state.get_impl(self.key).\
get_committed_value(state, dict_, passive=passive)
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
dest_dict[self.key] = value
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
elif dest_state.has_identity and self.key not in dest_dict:
dest_state._expire_attributes(dest_dict, [self.key])
class Comparator(util.MemoizedSlots, PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.ColumnProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = '__clause_element__', 'info'
def _memoized_method___clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
# no adapter, so we aren't aliased
# assert self._parententity is self._parentmapper
return self.prop.columns[0]._annotate({
"parententity": self._parententity,
"parentmapper": self._parententity})
def _memoized_attr_info(self):
ce = self.__clause_element__()
try:
return ce.info
except AttributeError:
return self.prop.info
def _fallback_getattr(self, key):
"""proxy attribute access down to the mapped column.
this allows user-defined comparison methods to be accessed.
"""
return getattr(self.__clause_element__(), key)
def operate(self, op, *other, **kwargs):
return op(self.__clause_element__(), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
col = self.__clause_element__()
return op(col._bind_param(op, other), col, **kwargs)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
| |
from __future__ import division, print_function, absolute_import
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
from scipy._lib.six import xrange
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb, gamma
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in xrange(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand-side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in xrange(Mk + 1)]
shifts = [-bound - k for k in xrange(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in xrange(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| |
"""Tests for the EntityPlatform helper."""
import asyncio
from datetime import timedelta
import logging
from unittest.mock import Mock, patch
import pytest
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED, PERCENTAGE
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.helpers import (
device_registry as dr,
entity_platform,
entity_registry as er,
)
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.entity_component import (
DEFAULT_SCAN_INTERVAL,
EntityComponent,
)
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
MockEntity,
MockEntityPlatform,
MockPlatform,
async_fire_time_changed,
mock_entity_platform,
mock_registry,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
PLATFORM = "test_platform"
async def test_polling_only_updates_entities_it_should_poll(hass):
"""Test the polling of only updated entities."""
component = EntityComponent(_LOGGER, DOMAIN, hass, timedelta(seconds=20))
no_poll_ent = MockEntity(should_poll=False)
no_poll_ent.async_update = Mock()
poll_ent = MockEntity(should_poll=True)
poll_ent.async_update = Mock()
await component.async_add_entities([no_poll_ent, poll_ent])
no_poll_ent.async_update.reset_mock()
poll_ent.async_update.reset_mock()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=20))
await hass.async_block_till_done()
assert not no_poll_ent.async_update.called
assert poll_ent.async_update.called
async def test_polling_disabled_by_config_entry(hass):
"""Test the polling of only updated entities."""
entity_platform = MockEntityPlatform(hass)
entity_platform.config_entry = MockConfigEntry(pref_disable_polling=True)
poll_ent = MockEntity(should_poll=True)
await entity_platform.async_add_entities([poll_ent])
assert entity_platform._async_unsub_polling is None
async def test_polling_updates_entities_with_exception(hass):
"""Test the updated entities that not break with an exception."""
component = EntityComponent(_LOGGER, DOMAIN, hass, timedelta(seconds=20))
update_ok = []
update_err = []
def update_mock():
"""Mock normal update."""
update_ok.append(None)
def update_mock_err():
"""Mock error update."""
update_err.append(None)
raise AssertionError("Fake error update")
ent1 = MockEntity(should_poll=True)
ent1.update = update_mock_err
ent2 = MockEntity(should_poll=True)
ent2.update = update_mock
ent3 = MockEntity(should_poll=True)
ent3.update = update_mock
ent4 = MockEntity(should_poll=True)
ent4.update = update_mock
await component.async_add_entities([ent1, ent2, ent3, ent4])
update_ok.clear()
update_err.clear()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=20))
await hass.async_block_till_done()
assert len(update_ok) == 3
assert len(update_err) == 1
async def test_update_state_adds_entities(hass):
"""Test if updating poll entities cause an entity to be added works."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
ent1 = MockEntity()
ent2 = MockEntity(should_poll=True)
await component.async_add_entities([ent2])
assert len(hass.states.async_entity_ids()) == 1
ent2.update = lambda *_: component.add_entities([ent1])
async_fire_time_changed(hass, dt_util.utcnow() + DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 2
async def test_update_state_adds_entities_with_update_before_add_true(hass):
"""Test if call update before add to state machine."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
ent = MockEntity()
ent.update = Mock(spec_set=True)
await component.async_add_entities([ent], True)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert ent.update.called
async def test_update_state_adds_entities_with_update_before_add_false(hass):
"""Test if not call update before add to state machine."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
ent = MockEntity()
ent.update = Mock(spec_set=True)
await component.async_add_entities([ent], False)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert not ent.update.called
@patch("homeassistant.helpers.entity_platform.async_track_time_interval")
async def test_set_scan_interval_via_platform(mock_track, hass):
"""Test the setting of the scan interval via platform."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
platform = MockPlatform(platform_setup)
platform.SCAN_INTERVAL = timedelta(seconds=30)
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({DOMAIN: {"platform": "platform"}})
await hass.async_block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
async def test_adding_entities_with_generator_and_thread_callback(hass):
"""Test generator in add_entities that calls thread method.
We should make sure we resolve the generator to a list before passing
it into an async context.
"""
component = EntityComponent(_LOGGER, DOMAIN, hass)
def create_entity(number):
"""Create entity helper."""
entity = MockEntity(unique_id=f"unique{number}")
entity.entity_id = async_generate_entity_id(DOMAIN + ".{}", "Number", hass=hass)
return entity
await component.async_add_entities(create_entity(i) for i in range(2))
async def test_platform_warn_slow_setup(hass):
"""Warn we log when platform setup takes a long time."""
platform = MockPlatform()
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
with patch.object(hass.loop, "call_later") as mock_call:
await component.async_setup({DOMAIN: {"platform": "platform"}})
await hass.async_block_till_done()
assert mock_call.called
# mock_calls[0] is the warning message for component setup
# mock_calls[4] is the warning message for platform setup
timeout, logger_method = mock_call.mock_calls[4][1][:2]
assert timeout == entity_platform.SLOW_SETUP_WARNING
assert logger_method == _LOGGER.warning
assert mock_call().cancel.called
async def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(entity_platform, "SLOW_SETUP_MAX_WAIT", 0):
called = []
async def setup_platform(*args):
called.append(1)
await asyncio.sleep(1)
platform = MockPlatform(async_setup_platform=setup_platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
mock_entity_platform(hass, "test_domain.test_platform", platform)
await component.async_setup({DOMAIN: {"platform": "test_platform"}})
await hass.async_block_till_done()
assert len(called) == 1
assert "test_domain.test_platform" not in hass.config.components
assert "test_platform is taking longer than 0 seconds" in caplog.text
async def test_updated_state_used_for_entity_id(hass):
"""Test that first update results used for entity ID generation."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
class MockEntityNameFetcher(MockEntity):
"""Mock entity that fetches a friendly name."""
async def async_update(self):
"""Mock update that assigns a name."""
self._values["name"] = "Living Room"
await component.async_add_entities([MockEntityNameFetcher()], True)
entity_ids = hass.states.async_entity_ids()
assert len(entity_ids) == 1
assert entity_ids[0] == "test_domain.living_room"
async def test_parallel_updates_async_platform(hass):
"""Test async platform does not have parallel_updates limit by default."""
platform = MockPlatform()
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({DOMAIN: {"platform": "platform"}})
await hass.async_block_till_done()
handle = list(component._platforms.values())[-1]
assert handle.parallel_updates is None
class AsyncEntity(MockEntity):
"""Mock entity that has async_update."""
async def async_update(self):
pass
entity = AsyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is None
async def test_parallel_updates_async_platform_with_constant(hass):
"""Test async platform can set parallel_updates limit."""
platform = MockPlatform()
platform.PARALLEL_UPDATES = 2
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({DOMAIN: {"platform": "platform"}})
await hass.async_block_till_done()
handle = list(component._platforms.values())[-1]
class AsyncEntity(MockEntity):
"""Mock entity that has async_update."""
async def async_update(self):
pass
entity = AsyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is not None
assert entity.parallel_updates._value == 2
async def test_parallel_updates_sync_platform(hass):
"""Test sync platform parallel_updates default set to 1."""
platform = MockPlatform()
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({DOMAIN: {"platform": "platform"}})
await hass.async_block_till_done()
handle = list(component._platforms.values())[-1]
class SyncEntity(MockEntity):
"""Mock entity that has update."""
async def update(self):
pass
entity = SyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is not None
assert entity.parallel_updates._value == 1
async def test_parallel_updates_sync_platform_with_constant(hass):
"""Test sync platform can set parallel_updates limit."""
platform = MockPlatform()
platform.PARALLEL_UPDATES = 2
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({DOMAIN: {"platform": "platform"}})
await hass.async_block_till_done()
handle = list(component._platforms.values())[-1]
class SyncEntity(MockEntity):
"""Mock entity that has update."""
async def update(self):
pass
entity = SyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is not None
assert entity.parallel_updates._value == 2
async def test_raise_error_on_update(hass):
"""Test the add entity if they raise an error on update."""
updates = []
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity1 = MockEntity(name="test_1")
entity2 = MockEntity(name="test_2")
def _raise():
"""Raise an exception."""
raise AssertionError
entity1.update = _raise
entity2.update = lambda: updates.append(1)
await component.async_add_entities([entity1, entity2], True)
assert len(updates) == 1
assert 1 in updates
assert entity1.hass is None
assert entity1.platform is None
assert entity2.hass is not None
assert entity2.platform is not None
async def test_async_remove_with_platform(hass):
"""Remove an entity from a platform."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity1 = MockEntity(name="test_1")
await component.async_add_entities([entity1])
assert len(hass.states.async_entity_ids()) == 1
await entity1.async_remove()
assert len(hass.states.async_entity_ids()) == 0
async def test_not_adding_duplicate_entities_with_unique_id(hass, caplog):
"""Test for not adding duplicate entities."""
caplog.set_level(logging.ERROR)
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test1", unique_id="not_very_unique")]
)
assert len(hass.states.async_entity_ids()) == 1
assert not caplog.text
ent2 = MockEntity(name="test2", unique_id="not_very_unique")
await component.async_add_entities([ent2])
assert "test1" in caplog.text
assert DOMAIN in caplog.text
ent3 = MockEntity(
name="test2", entity_id="test_domain.test3", unique_id="not_very_unique"
)
await component.async_add_entities([ent3])
assert "test1" in caplog.text
assert "test3" in caplog.text
assert DOMAIN in caplog.text
assert ent2.hass is None
assert ent2.platform is None
assert len(hass.states.async_entity_ids()) == 1
async def test_using_prescribed_entity_id(hass):
"""Test for using predefined entity ID."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="bla", entity_id="hello.world")]
)
assert "hello.world" in hass.states.async_entity_ids()
async def test_using_prescribed_entity_id_with_unique_id(hass):
"""Test for amending predefined entity ID because currently exists."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([MockEntity(entity_id="test_domain.world")])
await component.async_add_entities(
[MockEntity(entity_id="test_domain.world", unique_id="bla")]
)
assert "test_domain.world_2" in hass.states.async_entity_ids()
async def test_using_prescribed_entity_id_which_is_registered(hass):
"""Test not allowing predefined entity ID that already registered."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = mock_registry(hass)
# Register test_domain.world
registry.async_get_or_create(DOMAIN, "test", "1234", suggested_object_id="world")
# This entity_id will be rewritten
await component.async_add_entities([MockEntity(entity_id="test_domain.world")])
assert "test_domain.world_2" in hass.states.async_entity_ids()
async def test_name_which_conflict_with_registered(hass):
"""Test not generating conflicting entity ID based on name."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = mock_registry(hass)
# Register test_domain.world
registry.async_get_or_create(DOMAIN, "test", "1234", suggested_object_id="world")
await component.async_add_entities([MockEntity(name="world")])
assert "test_domain.world_2" in hass.states.async_entity_ids()
async def test_entity_with_name_and_entity_id_getting_registered(hass):
"""Ensure that entity ID is used for registration."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(unique_id="1234", name="bla", entity_id="test_domain.world")]
)
assert "test_domain.world" in hass.states.async_entity_ids()
async def test_overriding_name_from_registry(hass):
"""Test that we can override a name via the Entity Registry."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
mock_registry(
hass,
{
"test_domain.world": er.RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_domain",
name="Overridden",
)
},
)
await component.async_add_entities(
[MockEntity(unique_id="1234", name="Device Name")]
)
state = hass.states.get("test_domain.world")
assert state is not None
assert state.name == "Overridden"
async def test_registry_respect_entity_namespace(hass):
"""Test that the registry respects entity namespace."""
mock_registry(hass)
platform = MockEntityPlatform(hass, entity_namespace="ns")
entity = MockEntity(unique_id="1234", name="Device Name")
await platform.async_add_entities([entity])
assert entity.entity_id == "test_domain.ns_device_name"
async def test_registry_respect_entity_disabled(hass):
"""Test that the registry respects entity disabled."""
mock_registry(
hass,
{
"test_domain.world": er.RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
disabled_by=er.DISABLED_USER,
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
assert entity.entity_id == "test_domain.world"
assert hass.states.async_entity_ids() == []
async def test_entity_registry_updates_name(hass):
"""Test that updates on the entity registry update platform entities."""
registry = mock_registry(
hass,
{
"test_domain.world": er.RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
name="before update",
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
assert state.name == "before update"
registry.async_update_entity("test_domain.world", name="after update")
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("test_domain.world")
assert state.name == "after update"
async def test_setup_entry(hass):
"""Test we can setup an entry."""
registry = mock_registry(hass)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities([MockEntity(name="test1", unique_id="unique")])
return True
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry(entry_id="super-mock-id")
entity_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
full_name = f"{entity_platform.domain}.{config_entry.domain}"
assert full_name in hass.config.components
assert len(hass.states.async_entity_ids()) == 1
assert len(registry.entities) == 1
assert registry.entities["test_domain.test1"].config_entry_id == "super-mock-id"
async def test_setup_entry_platform_not_ready(hass, caplog):
"""Test when an entry is not ready yet."""
async_setup_entry = Mock(side_effect=PlatformNotReady)
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
with patch.object(entity_platform, "async_call_later") as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
full_name = f"{ent_platform.domain}.{config_entry.domain}"
assert full_name not in hass.config.components
assert len(async_setup_entry.mock_calls) == 1
assert "Platform test not ready yet" in caplog.text
assert len(mock_call_later.mock_calls) == 1
async def test_setup_entry_platform_not_ready_with_message(hass, caplog):
"""Test when an entry is not ready yet that includes a message."""
async_setup_entry = Mock(side_effect=PlatformNotReady("lp0 on fire"))
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
with patch.object(entity_platform, "async_call_later") as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
full_name = f"{ent_platform.domain}.{config_entry.domain}"
assert full_name not in hass.config.components
assert len(async_setup_entry.mock_calls) == 1
assert "Platform test not ready yet" in caplog.text
assert "lp0 on fire" in caplog.text
assert len(mock_call_later.mock_calls) == 1
async def test_setup_entry_platform_not_ready_from_exception(hass, caplog):
"""Test when an entry is not ready yet that includes the causing exception string."""
original_exception = HomeAssistantError("The device dropped the connection")
platform_exception = PlatformNotReady()
platform_exception.__cause__ = original_exception
async_setup_entry = Mock(side_effect=platform_exception)
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
with patch.object(entity_platform, "async_call_later") as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
full_name = f"{ent_platform.domain}.{config_entry.domain}"
assert full_name not in hass.config.components
assert len(async_setup_entry.mock_calls) == 1
assert "Platform test not ready yet" in caplog.text
assert "The device dropped the connection" in caplog.text
assert len(mock_call_later.mock_calls) == 1
async def test_reset_cancels_retry_setup(hass):
"""Test that resetting a platform will cancel scheduled a setup retry."""
async_setup_entry = Mock(side_effect=PlatformNotReady)
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
with patch.object(entity_platform, "async_call_later") as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
assert len(mock_call_later.mock_calls) == 1
assert len(mock_call_later.return_value.mock_calls) == 0
assert ent_platform._async_cancel_retry_setup is not None
await ent_platform.async_reset()
assert len(mock_call_later.return_value.mock_calls) == 1
assert ent_platform._async_cancel_retry_setup is None
async def test_reset_cancels_retry_setup_when_not_started(hass):
"""Test that resetting a platform will cancel scheduled a setup retry when not yet started."""
hass.state = CoreState.starting
async_setup_entry = Mock(side_effect=PlatformNotReady)
initial_listeners = hass.bus.async_listeners()[EVENT_HOMEASSISTANT_STARTED]
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
assert not await ent_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
assert (
hass.bus.async_listeners()[EVENT_HOMEASSISTANT_STARTED] == initial_listeners + 1
)
assert ent_platform._async_cancel_retry_setup is not None
await ent_platform.async_reset()
await hass.async_block_till_done()
assert hass.bus.async_listeners()[EVENT_HOMEASSISTANT_STARTED] == initial_listeners
assert ent_platform._async_cancel_retry_setup is None
async def test_stop_shutdown_cancels_retry_setup_and_interval_listener(hass):
"""Test that shutdown will cancel scheduled a setup retry and interval listener."""
async_setup_entry = Mock(side_effect=PlatformNotReady)
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
with patch.object(entity_platform, "async_call_later") as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
assert len(mock_call_later.mock_calls) == 1
assert len(mock_call_later.return_value.mock_calls) == 0
assert ent_platform._async_cancel_retry_setup is not None
await ent_platform.async_shutdown()
assert len(mock_call_later.return_value.mock_calls) == 1
assert ent_platform._async_unsub_polling is None
assert ent_platform._async_cancel_retry_setup is None
async def test_not_fails_with_adding_empty_entities_(hass):
"""Test for not fails on empty entities list."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([])
assert len(hass.states.async_entity_ids()) == 0
async def test_entity_registry_updates_entity_id(hass):
"""Test that updates on the entity registry update platform entities."""
registry = mock_registry(
hass,
{
"test_domain.world": er.RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
name="Some name",
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
assert state.name == "Some name"
registry.async_update_entity(
"test_domain.world", new_entity_id="test_domain.planet"
)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get("test_domain.world") is None
assert hass.states.get("test_domain.planet") is not None
async def test_entity_registry_updates_invalid_entity_id(hass):
"""Test that we can't update to an invalid entity id."""
registry = mock_registry(
hass,
{
"test_domain.world": er.RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
name="Some name",
),
"test_domain.existing": er.RegistryEntry(
entity_id="test_domain.existing",
unique_id="5678",
platform="test_platform",
),
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
assert state.name == "Some name"
with pytest.raises(ValueError):
registry.async_update_entity(
"test_domain.world", new_entity_id="test_domain.existing"
)
with pytest.raises(ValueError):
registry.async_update_entity(
"test_domain.world", new_entity_id="invalid_entity_id"
)
with pytest.raises(ValueError):
registry.async_update_entity(
"test_domain.world", new_entity_id="diff_domain.world"
)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get("test_domain.world") is not None
assert hass.states.get("invalid_entity_id") is None
assert hass.states.get("diff_domain.world") is None
async def test_device_info_called(hass):
"""Test device info is forwarded correctly."""
registry = dr.async_get(hass)
via = registry.async_get_or_create(
config_entry_id="123",
connections=set(),
identifiers={("hue", "via-id")},
manufacturer="manufacturer",
model="via",
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities(
[
# Invalid device info
MockEntity(unique_id="abcd", device_info={}),
# Valid device info
MockEntity(
unique_id="qwer",
device_info={
"identifiers": {("hue", "1234")},
"connections": {(dr.CONNECTION_NETWORK_MAC, "abcd")},
"manufacturer": "test-manuf",
"model": "test-model",
"name": "test-name",
"sw_version": "test-sw",
"suggested_area": "Heliport",
"entry_type": "service",
"via_device": ("hue", "via-id"),
},
),
]
)
return True
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry(entry_id="super-mock-id")
entity_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 2
device = registry.async_get_device({("hue", "1234")})
assert device is not None
assert device.identifiers == {("hue", "1234")}
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, "abcd")}
assert device.manufacturer == "test-manuf"
assert device.model == "test-model"
assert device.name == "test-name"
assert device.sw_version == "test-sw"
assert device.suggested_area == "Heliport"
assert device.entry_type == "service"
assert device.via_device_id == via.id
async def test_device_info_not_overrides(hass):
"""Test device info is forwarded correctly."""
registry = dr.async_get(hass)
device = registry.async_get_or_create(
config_entry_id="bla",
connections={(dr.CONNECTION_NETWORK_MAC, "abcd")},
manufacturer="test-manufacturer",
model="test-model",
)
assert device.manufacturer == "test-manufacturer"
assert device.model == "test-model"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities(
[
MockEntity(
unique_id="qwer",
device_info={
"connections": {(dr.CONNECTION_NETWORK_MAC, "abcd")},
"default_name": "default name 1",
"default_model": "default model 1",
"default_manufacturer": "default manufacturer 1",
},
)
]
)
return True
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry(entry_id="super-mock-id")
entity_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
device2 = registry.async_get_device(set(), {(dr.CONNECTION_NETWORK_MAC, "abcd")})
assert device2 is not None
assert device.id == device2.id
assert device2.manufacturer == "test-manufacturer"
assert device2.model == "test-model"
async def test_entity_disabled_by_integration(hass):
"""Test entity disabled by integration."""
component = EntityComponent(_LOGGER, DOMAIN, hass, timedelta(seconds=20))
entity_default = MockEntity(unique_id="default")
entity_disabled = MockEntity(
unique_id="disabled", entity_registry_enabled_default=False
)
await component.async_add_entities([entity_default, entity_disabled])
assert entity_default.hass is not None
assert entity_default.platform is not None
assert entity_disabled.hass is None
assert entity_disabled.platform is None
registry = er.async_get(hass)
entry_default = registry.async_get_or_create(DOMAIN, DOMAIN, "default")
assert entry_default.disabled_by is None
entry_disabled = registry.async_get_or_create(DOMAIN, DOMAIN, "disabled")
assert entry_disabled.disabled_by == er.DISABLED_INTEGRATION
async def test_entity_info_added_to_entity_registry(hass):
"""Test entity info is written to entity registry."""
component = EntityComponent(_LOGGER, DOMAIN, hass, timedelta(seconds=20))
entity_default = MockEntity(
unique_id="default",
capability_attributes={"max": 100},
supported_features=5,
device_class="mock-device-class",
unit_of_measurement=PERCENTAGE,
)
await component.async_add_entities([entity_default])
registry = er.async_get(hass)
entry_default = registry.async_get_or_create(DOMAIN, DOMAIN, "default")
assert entry_default.capabilities == {"max": 100}
assert entry_default.supported_features == 5
assert entry_default.device_class == "mock-device-class"
assert entry_default.unit_of_measurement == PERCENTAGE
async def test_override_restored_entities(hass):
"""Test that we allow overriding restored entities."""
registry = mock_registry(hass)
registry.async_get_or_create(
"test_domain", "test_domain", "1234", suggested_object_id="world"
)
hass.states.async_set("test_domain.world", "unavailable", {"restored": True})
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(unique_id="1234", state="on", entity_id="test_domain.world")], True
)
state = hass.states.get("test_domain.world")
assert state.state == "on"
async def test_platform_with_no_setup(hass, caplog):
"""Test setting up a platform that does not support setup."""
entity_platform = MockEntityPlatform(
hass, domain="mock-integration", platform_name="mock-platform", platform=None
)
await entity_platform.async_setup(None)
assert (
"The mock-platform platform for the mock-integration integration does not support platform setup."
in caplog.text
)
async def test_platforms_sharing_services(hass):
"""Test platforms share services."""
entity_platform1 = MockEntityPlatform(
hass, domain="mock_integration", platform_name="mock_platform", platform=None
)
entity1 = MockEntity(entity_id="mock_integration.entity_1")
await entity_platform1.async_add_entities([entity1])
entity_platform2 = MockEntityPlatform(
hass, domain="mock_integration", platform_name="mock_platform", platform=None
)
entity2 = MockEntity(entity_id="mock_integration.entity_2")
await entity_platform2.async_add_entities([entity2])
entity_platform3 = MockEntityPlatform(
hass,
domain="different_integration",
platform_name="mock_platform",
platform=None,
)
entity3 = MockEntity(entity_id="different_integration.entity_3")
await entity_platform3.async_add_entities([entity3])
entities = []
@callback
def handle_service(entity, data):
entities.append(entity)
entity_platform1.async_register_entity_service("hello", {}, handle_service)
entity_platform2.async_register_entity_service(
"hello", {}, Mock(side_effect=AssertionError("Should not be called"))
)
await hass.services.async_call(
"mock_platform", "hello", {"entity_id": "all"}, blocking=True
)
assert len(entities) == 2
assert entity1 in entities
assert entity2 in entities
async def test_invalid_entity_id(hass):
"""Test specifying an invalid entity id."""
platform = MockEntityPlatform(hass)
entity = MockEntity(entity_id="invalid_entity_id")
with pytest.raises(HomeAssistantError):
await platform.async_add_entities([entity])
assert entity.hass is None
assert entity.platform is None
class MockBlockingEntity(MockEntity):
"""Class to mock an entity that will block adding entities."""
async def async_added_to_hass(self):
"""Block for a long time."""
await asyncio.sleep(1000)
async def test_setup_entry_with_entities_that_block_forever(hass, caplog):
"""Test we cancel adding entities when we reach the timeout."""
registry = mock_registry(hass)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities([MockBlockingEntity(name="test1", unique_id="unique")])
return True
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry(entry_id="super-mock-id")
mock_entity_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
with patch.object(entity_platform, "SLOW_ADD_ENTITY_MAX_WAIT", 0.01), patch.object(
entity_platform, "SLOW_ADD_MIN_TIMEOUT", 0.01
):
assert await mock_entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
full_name = f"{mock_entity_platform.domain}.{config_entry.domain}"
assert full_name in hass.config.components
assert len(hass.states.async_entity_ids()) == 0
assert len(registry.entities) == 1
assert "Timed out adding entities" in caplog.text
assert "test_domain.test1" in caplog.text
assert "test_domain" in caplog.text
assert "test" in caplog.text
async def test_two_platforms_add_same_entity(hass):
"""Test two platforms in the same domain adding an entity with the same name."""
entity_platform1 = MockEntityPlatform(
hass, domain="mock_integration", platform_name="mock_platform", platform=None
)
entity1 = SlowEntity(name="entity_1")
entity_platform2 = MockEntityPlatform(
hass, domain="mock_integration", platform_name="mock_platform", platform=None
)
entity2 = SlowEntity(name="entity_1")
await asyncio.gather(
entity_platform1.async_add_entities([entity1]),
entity_platform2.async_add_entities([entity2]),
)
entities = []
@callback
def handle_service(entity, *_):
entities.append(entity)
entity_platform1.async_register_entity_service("hello", {}, handle_service)
await hass.services.async_call(
"mock_platform", "hello", {"entity_id": "all"}, blocking=True
)
assert len(entities) == 2
assert {entity1.entity_id, entity2.entity_id} == {
"mock_integration.entity_1",
"mock_integration.entity_1_2",
}
assert entity1 in entities
assert entity2 in entities
class SlowEntity(MockEntity):
"""An entity that will sleep during add."""
async def async_added_to_hass(self):
"""Make sure control is returned to the event loop on add."""
await asyncio.sleep(0.1)
await super().async_added_to_hass()
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from django.core.exceptions import ImproperlyConfigured
from django.conf.urls import url, include
from django.contrib.formtools.wizard.views import normalize_name
from django.db.models import signals
from django.template.defaultfilters import slugify
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import get_language, deactivate_all, activate
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered
from cms.plugin_base import CMSPluginBase
from cms.models import CMSPlugin
from cms.utils.django_load import load
from cms.utils.helpers import reversion_register
from cms.utils.placeholder import get_placeholder_conf
from cms.utils.compat.dj import is_installed
class PluginPool(object):
def __init__(self):
self.plugins = {}
self.discovered = False
self.patched = False
def discover_plugins(self):
if self.discovered:
return
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
load('cms_plugins')
self.discovered = True
def clear(self):
self.discovered = False
self.plugins = {}
self.patched = False
def register_plugin(self, plugin):
"""
Registers the given plugin(s).
If a plugin is already registered, this will raise PluginAlreadyRegistered.
"""
if not issubclass(plugin, CMSPluginBase):
raise ImproperlyConfigured(
"CMS Plugins must be subclasses of CMSPluginBase, %r is not."
% plugin
)
if (plugin.render_plugin and not type(plugin.render_plugin) == property
or hasattr(plugin.model, 'render_template')
or hasattr(plugin, 'get_render_template')):
if (plugin.render_template is None and
not hasattr(plugin.model, 'render_template') and
not hasattr(plugin, 'get_render_template')):
raise ImproperlyConfigured(
"CMS Plugins must define a render template, "
"a get_render_template method or "
"set render_plugin=False: %s" % plugin
)
# If plugin class defines get_render_template we cannot
# statically check for valid template file as it depends
# on plugin configuration and context.
# We cannot prevent developer to shoot in the users' feet
elif not hasattr(plugin, 'get_render_template'):
from django.template import loader
template = ((hasattr(plugin.model, 'render_template') and
plugin.model.render_template) or
plugin.render_template)
if isinstance(template, six.string_types) and template:
try:
loader.get_template(template)
except TemplateDoesNotExist as e:
# Note that the template loader will throw
# TemplateDoesNotExist if the plugin's render_template
# does in fact exist, but it includes a template that
# doesn't.
if six.text_type(e) == template:
raise ImproperlyConfigured(
"CMS Plugins must define a render template (%s) that exists: %s"
% (plugin, template)
)
else:
pass
except TemplateSyntaxError:
pass
else:
if plugin.allow_children:
raise ImproperlyConfigured(
"CMS Plugins can not define render_plugin=False and allow_children=True: %s"
% plugin
)
plugin_name = plugin.__name__
if plugin_name in self.plugins:
raise PluginAlreadyRegistered(
"Cannot register %r, a plugin with this name (%r) is already "
"registered." % (plugin, plugin_name)
)
plugin.value = plugin_name
self.plugins[plugin_name] = plugin
from cms.signals import pre_save_plugins, post_delete_plugins, pre_delete_plugins
signals.pre_save.connect(pre_save_plugins, sender=plugin.model,
dispatch_uid='cms_pre_save_plugin_%s' % plugin_name)
signals.post_delete.connect(post_delete_plugins, sender=CMSPlugin,
dispatch_uid='cms_post_delete_plugin_%s' % plugin_name)
signals.pre_delete.connect(pre_delete_plugins, sender=CMSPlugin,
dispatch_uid='cms_pre_delete_plugin_%s' % plugin_name)
if is_installed('reversion'):
from reversion.revisions import RegistrationError
try:
reversion_register(plugin.model)
except RegistrationError:
pass
return plugin
def unregister_plugin(self, plugin):
"""
Unregisters the given plugin(s).
If a plugin isn't already registered, this will raise PluginNotRegistered.
"""
plugin_name = plugin.__name__
if plugin_name not in self.plugins:
raise PluginNotRegistered(
'The plugin %r is not registered' % plugin
)
del self.plugins[plugin_name]
def set_plugin_meta(self):
"""
Patches a plugin model by forcing a specifc db_table whether the
'new style' table name exists or not. The same goes for all the
ManyToMany attributes.
This method must be run whenever a plugin model is accessed
directly.
The model is modified in place; a 'patched' attribute is added
to the model to check whether it's already been modified.
"""
if self.patched:
return
self.patched = True
def get_all_plugins(self, placeholder=None, page=None, setting_key="plugins", include_page_only=True):
self.discover_plugins()
self.set_plugin_meta()
plugins = sorted(self.plugins.values(), key=attrgetter('name'))
final_plugins = []
template = page and page.get_template() or None
allowed_plugins = get_placeholder_conf(
setting_key,
placeholder,
template,
) or ()
for plugin in plugins:
include_plugin = False
if placeholder and not plugin.get_require_parent(placeholder, page):
include_plugin = not allowed_plugins and setting_key == "plugins" or plugin.__name__ in allowed_plugins
if plugin.page_only and not include_page_only:
include_plugin = False
if include_plugin:
final_plugins.append(plugin)
if final_plugins or placeholder:
plugins = final_plugins
return sorted(plugins, key=attrgetter('module'))
def get_text_enabled_plugins(self, placeholder, page):
plugins = self.get_all_plugins(placeholder, page)
plugins += self.get_all_plugins(placeholder, page, 'text_only_plugins')
return sorted((p for p in set(plugins) if p.text_enabled),
key=attrgetter('module', 'name'))
def get_plugin(self, name):
"""
Retrieve a plugin from the cache.
"""
self.discover_plugins()
self.set_plugin_meta()
return self.plugins[name]
def get_patterns(self):
self.discover_plugins()
# We want untranslated name of the plugin for its slug so we deactivate translation
lang = get_language()
deactivate_all()
try:
url_patterns = []
for plugin in self.get_all_plugins():
p = plugin()
slug = slugify(force_text(normalize_name(p.__class__.__name__)))
url_patterns += [
url(r'^plugin/%s/' % (slug,), include(p.plugin_urls)),
]
finally:
# Reactivate translation
activate(lang)
return url_patterns
plugin_pool = PluginPool()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
from copy import deepcopy
from warnings import warn
from nibabel import load, funcs, Nifti1Image
import numpy as np
from scipy import signal
import scipy.io as sio
from ..interfaces.base import (BaseInterface, traits, InputMultiPath,
OutputMultiPath, TraitedSpec, File,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import filename_to_list, save_json, split_filename
from ..utils.misc import find_indices
from .. import logging, config
iflogger = logging.getLogger('interface')
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
if source == 'NIPY':
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
#process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)],
[-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params):]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ('AFNI', 'FSFAST'):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((mc.shape[0], n_pts))
if brain_pts is not None:
displacement = np.zeros((mc.shape[0], n_pts / 3))
for i in range(mc.shape[0]):
affine = _get_affine_matrix(mc[i, :], source)
newpos[i, :] = np.dot(affine,
all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = \
np.sqrt(np.sum(np.power(np.reshape(newpos[i, :],
(3, all_pts.shape[1])) -
all_pts[0:3, :],
2),
axis=0))
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(mc.shape[0])
if use_differences:
newpos = np.concatenate((np.zeros((1, n_pts)),
np.diff(newpos, n=1, axis=0)), axis=0)
for i in range(newpos.shape[0]):
normdata[i] = \
np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2),
(3, all_pts.shape[1])), axis=0)))
else:
newpos = np.abs(signal.detrend(newpos, axis=0, type='constant'))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
def _nanmean(a, axis=None):
"""Return the mean excluding items that are nan
>>> a = [1, 2, np.nan]
>>> _nanmean(a)
1.5
"""
if axis:
return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis)
else:
return np.nansum(a) / np.sum(1 - np.isnan(a))
class ArtifactDetectInputSpec(BaseInterfaceInputSpec):
realigned_files = InputMultiPath(File(exists=True),
desc="Names of realigned functional data files",
mandatory=True)
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=("Names of realignment parameters"
"corresponding to the functional data files"))
parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST",
desc="Source of movement parameters",
mandatory=True)
use_differences = traits.ListBool([True, False], minlen=2, maxlen=2,
usedefault=True,
desc=("Use differences between successive motion (first element)"
"and intensity paramter (second element) estimates in order"
"to determine outliers. (default is [True, False])"))
use_norm = traits.Bool(True, requires=['norm_threshold'],
desc=("Uses a composite of the motion parameters in "
"order to determine outliers."),
usedefault=True)
norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela"
"ted outliers when composite motion is "
"being used"), mandatory=True,
xor=['rotation_threshold',
'translation_threshold'])
rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in radians) to use to detect rotation-related "
"outliers"))
translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in mm) to use to detect translation-related "
"outliers"))
zintensity_threshold = traits.Float(mandatory=True,
desc=("Intensity Z-threshold use to detection images that deviate "
"from the mean"))
mask_type = traits.Enum('spm_global', 'file', 'thresh',
desc=("Type of mask that should be used to mask the functional "
"data. *spm_global* uses an spm_global like calculation to "
"determine the brain mask. *file* specifies a brain mask "
"file (should be an image file consisting of 0s and 1s). "
"*thresh* specifies a threshold to use. By default all voxels"
"are used, unless one of these mask types are defined."),
mandatory=True)
mask_file = File(exists=True,
desc="Mask file to be used if mask_type is 'file'.")
mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type"
" is 'thresh'."))
intersect_mask = traits.Bool(True,
desc=("Intersect the masks when computed from "
"spm_global."))
save_plot = traits.Bool(True, desc="save plots containing outliers",
usedefault=True)
plot_type = traits.Enum('png', 'svg', 'eps', 'pdf',
desc="file type of the outlier plot",
usedefault=True)
bound_by_brainmask = traits.Bool(False, desc=("use the brain mask to "
"determine bounding box"
"for composite norm (works"
"for SPM and Nipy - currently"
"inaccurate for FSL, AFNI"),
usedefault=True)
global_threshold = traits.Float(8.0, desc=("use this threshold when mask "
"type equal's spm_global"),
usedefault=True)
class ArtifactDetectOutputSpec(TraitedSpec):
outlier_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing a list of "
"0-based indices corresponding to outlier volumes"))
intensity_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing the global "
"intensity values determined from the brainmask"))
norm_files = OutputMultiPath(File,
desc=("One file for each functional run containing the composite "
"norm"))
statistic_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing information "
"about the different types of artifacts and if design info is"
" provided then details of stimulus correlated motion and a "
"listing or artifacts by event type."))
plot_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the "
"detected outliers"))
mask_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the mask"
"used for global signal calculation"))
displacement_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the voxel"
"displacement timeseries"))
class ArtifactDetect(BaseInterface):
"""Detects outliers in a functional imaging series
Uses intensity and motion parameters to infer outliers. If `use_norm` is
True, it computes the movement of the center of each face a cuboid centered
around the head and returns the maximal movement across the centers.
Examples
--------
>>> ad = ArtifactDetect()
>>> ad.inputs.realigned_files = 'functional.nii'
>>> ad.inputs.realignment_parameters = 'functional.par'
>>> ad.inputs.parameter_source = 'FSL'
>>> ad.inputs.norm_threshold = 1
>>> ad.inputs.use_differences = [True, False]
>>> ad.inputs.zintensity_threshold = 3
>>> ad.run() # doctest: +SKIP
"""
input_spec = ArtifactDetectInputSpec
output_spec = ArtifactDetectOutputSpec
def __init__(self, **inputs):
super(ArtifactDetect, self).__init__(**inputs)
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
if isinstance(motionfile, str):
infile = motionfile
elif isinstance(motionfile, list):
infile = motionfile[0]
else:
raise Exception("Unknown type of file")
_, filename, ext = split_filename(infile)
artifactfile = os.path.join(output_dir, ''.join(('art.', filename,
'_outliers.txt')))
intensityfile = os.path.join(output_dir, ''.join(('global_intensity.',
filename, '.txt')))
statsfile = os.path.join(output_dir, ''.join(('stats.', filename,
'.txt')))
normfile = os.path.join(output_dir, ''.join(('norm.', filename,
'.txt')))
plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.',
self.inputs.plot_type)))
displacementfile = os.path.join(output_dir, ''.join(('disp.',
filename, ext)))
maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext)))
return (artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['outlier_files'] = []
outputs['intensity_files'] = []
outputs['statistic_files'] = []
outputs['mask_files'] = []
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'] = []
if self.inputs.bound_by_brainmask:
outputs['displacement_files'] = []
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'] = []
for i, f in enumerate(filename_to_list(self.inputs.realigned_files)):
(outlierfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = \
self._get_output_filenames(f, os.getcwd())
outputs['outlier_files'].insert(i, outlierfile)
outputs['intensity_files'].insert(i, intensityfile)
outputs['statistic_files'].insert(i, statsfile)
outputs['mask_files'].insert(i, maskfile)
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'].insert(i, normfile)
if self.inputs.bound_by_brainmask:
outputs['displacement_files'].insert(i, displacementfile)
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'].insert(i, plotfile)
return outputs
def _plot_outliers_with_wave(self, wave, outliers, name):
import matplotlib.pyplot as plt
plt.plot(wave)
plt.ylim([wave.min(), wave.max()])
plt.xlim([0, len(wave) - 1])
if len(outliers):
plt.plot(np.tile(outliers[:, None], (1, 2)).T,
np.tile([wave.min(), wave.max()], (len(outliers), 1)).T,
'r')
plt.xlabel('Scans - 0-based')
plt.ylabel(name)
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
"""
Core routine for detecting outliers
"""
if not cwd:
cwd = os.getcwd()
# read in functional image
if isinstance(imgfile, str):
nim = load(imgfile)
elif isinstance(imgfile, list):
if len(imgfile) == 1:
nim = load(imgfile[0])
else:
images = [load(f) for f in imgfile]
nim = funcs.concat_images(images)
# compute global intensity signal
(x, y, z, timepoints) = nim.get_shape()
data = nim.get_data()
affine = nim.get_affine()
g = np.zeros((timepoints, 1))
masktype = self.inputs.mask_type
if masktype == 'spm_global': # spm_global like calculation
iflogger.debug('art: using spm global')
intersect_mask = self.inputs.intersect_mask
if intersect_mask:
mask = np.ones((x, y, z), dtype=bool)
for t0 in range(timepoints):
vol = data[:, :, :, t0]
# Use an SPM like approach
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask = mask * mask_tmp
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
intersect_mask = False
g = np.zeros((timepoints, 1))
if not intersect_mask:
iflogger.info('not intersect_mask is True')
mask = np.zeros((x, y, z, timepoints))
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask[:, :, :, t0] = mask_tmp
g[t0] = np.nansum(vol * mask_tmp)/np.nansum(mask_tmp)
elif masktype == 'file': # uses a mask image to determine intensity
maskimg = load(self.inputs.mask_file)
mask = maskimg.get_data()
affine = maskimg.get_affine()
mask = mask > 0.5
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
elif masktype == 'thresh': # uses a fixed signal threshold
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask = vol > self.inputs.mask_threshold
g[t0] = _nanmean(vol[mask])
else:
mask = np.ones((x, y, z))
g = _nanmean(data[mask > 0, :], 1)
# compute normalized intensity values
gz = signal.detrend(g, axis=0) # detrend the signal
if self.inputs.use_differences[1]:
gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)),
axis=0)
gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal
iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)
# read in motion parameters
mc_in = np.loadtxt(motionfile)
mc = deepcopy(mc_in)
(artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd)
mask_img = Nifti1Image(mask.astype(np.uint8), affine)
mask_img.to_filename(maskfile)
if self.inputs.use_norm:
brain_pts = None
if self.inputs.bound_by_brainmask:
voxel_coords = np.nonzero(mask)
coords = np.vstack((voxel_coords[0],
np.vstack((voxel_coords[1],
voxel_coords[2])))).T
brain_pts = np.dot(affine,
np.hstack((coords,
np.ones((coords.shape[0], 1)))).T)
# calculate the norm of the motion parameters
normval, displacement = _calc_norm(mc,
self.inputs.use_differences[0],
self.inputs.parameter_source,
brain_pts=brain_pts)
tidx = find_indices(normval > self.inputs.norm_threshold)
ridx = find_indices(normval < 0)
if displacement is not None:
dmap = np.zeros((x, y, z, timepoints), dtype=np.float)
for i in range(timepoints):
dmap[voxel_coords[0],
voxel_coords[1],
voxel_coords[2], i] = displacement[i, :]
dimg = Nifti1Image(dmap, affine)
dimg.to_filename(displacementfile)
else:
if self.inputs.use_differences[0]:
mc = np.concatenate((np.zeros((1, 6)),
np.diff(mc_in, n=1, axis=0)),
axis=0)
traval = mc[:, 0:3] # translation parameters (mm)
rotval = mc[:, 3:6] # rotation parameters (rad)
tidx = find_indices(np.sum(abs(traval) >
self.inputs.translation_threshold, 1)
> 0)
ridx = find_indices(np.sum(abs(rotval) >
self.inputs.rotation_threshold, 1) > 0)
outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
# write output to outputfile
np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
if self.inputs.use_norm:
np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
fig = plt.figure()
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(211)
else:
plt.subplot(311)
self._plot_outliers_with_wave(gz, iidx, 'Intensity')
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(212)
self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx),
'Norm (mm)')
else:
diff = ''
if self.inputs.use_differences[0]:
diff = 'diff'
plt.subplot(312)
self._plot_outliers_with_wave(traval, tidx,
'Translation (mm)' + diff)
plt.subplot(313)
self._plot_outliers_with_wave(rotval, ridx,
'Rotation (rad)' + diff)
plt.savefig(plotfile)
plt.close(fig)
motion_outliers = np.union1d(tidx, ridx)
stats = [{'motion_file': motionfile,
'functional_file': imgfile},
{'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
'intensity_outliers': len(np.setdiff1d(iidx,
motion_outliers)),
'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
},
{'motion': [{'using differences': self.inputs.use_differences[0]},
{'mean': np.mean(mc_in, axis=0).tolist(),
'min': np.min(mc_in, axis=0).tolist(),
'max': np.max(mc_in, axis=0).tolist(),
'std': np.std(mc_in, axis=0).tolist()},
]},
{'intensity': [{'using differences': self.inputs.use_differences[1]},
{'mean': np.mean(gz, axis=0).tolist(),
'min': np.min(gz, axis=0).tolist(),
'max': np.max(gz, axis=0).tolist(),
'std': np.std(gz, axis=0).tolist()},
]},
]
if self.inputs.use_norm:
stats.insert(3, {'motion_norm':
{'mean': np.mean(normval, axis=0).tolist(),
'min': np.min(normval, axis=0).tolist(),
'max': np.max(normval, axis=0).tolist(),
'std': np.std(normval, axis=0).tolist(),
}})
save_json(statsfile, stats)
def _run_interface(self, runtime):
"""Execute this module.
"""
funcfilelist = filename_to_list(self.inputs.realigned_files)
motparamlist = filename_to_list(self.inputs.realignment_parameters)
for i, imgf in enumerate(funcfilelist):
self._detect_outliers_core(imgf, motparamlist[i], i,
cwd=os.getcwd())
return runtime
class StimCorrInputSpec(BaseInterfaceInputSpec):
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=('Names of realignment parameters corresponding to the functional '
'data files'))
intensity_values = InputMultiPath(File(exists=True), mandatory=True,
desc='Name of file containing intensity values')
spm_mat_file = File(exists=True, mandatory=True,
desc='SPM mat file (use pre-estimate SPM.mat file)')
concatenated_design = traits.Bool(mandatory=True,
desc='state if the design matrix contains concatenated sessions')
class StimCorrOutputSpec(TraitedSpec):
stimcorr_files = OutputMultiPath(File(exists=True),
desc='List of files containing correlation values')
class StimulusCorrelation(BaseInterface):
"""Determines if stimuli are correlated with motion or intensity
parameters.
Currently this class supports an SPM generated design matrix and requires
intensity parameters. This implies that one must run
:ref:`ArtifactDetect <nipype.algorithms.rapidart.ArtifactDetect>`
and :ref:`Level1Design <nipype.interfaces.spm.model.Level1Design>` prior to running this or
provide an SPM.mat file and intensity parameters through some other means.
Examples
--------
>>> sc = StimulusCorrelation()
>>> sc.inputs.realignment_parameters = 'functional.par'
>>> sc.inputs.intensity_values = 'functional.rms'
>>> sc.inputs.spm_mat_file = 'SPM.mat'
>>> sc.inputs.concatenated_design = False
>>> sc.run() # doctest: +SKIP
"""
input_spec = StimCorrInputSpec
output_spec = StimCorrOutputSpec
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
(_, filename) = os.path.split(motionfile)
(filename, _) = os.path.splitext(filename)
corrfile = os.path.join(output_dir, ''.join(('qa.', filename,
'_stimcorr.txt')))
return corrfile
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()
def _get_spm_submatrix(self, spmmat, sessidx, rows=None):
"""
Parameters
----------
spmmat: scipy matlab object
full SPM.mat file loaded into a scipy object
sessidx: int
index to session that needs to be extracted.
"""
designmatrix = spmmat['SPM'][0][0].xX[0][0].X
U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0]
if rows is None:
rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1
cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][range(len(U))] - 1
outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(),
axis=1)
return outmatrix
def _run_interface(self, runtime):
"""Execute this module.
"""
motparamlist = self.inputs.realignment_parameters
intensityfiles = self.inputs.intensity_values
spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
nrows = []
for i in range(len(motparamlist)):
sessidx = i
rows = None
if self.inputs.concatenated_design:
sessidx = 0
mc_in = np.loadtxt(motparamlist[i])
rows = np.sum(nrows) + np.arange(mc_in.shape[0])
nrows.append(mc_in.shape[0])
matrix = self._get_spm_submatrix(spmmat, sessidx, rows)
self._stimcorr_core(motparamlist[i], intensityfiles[i],
matrix, os.getcwd())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
files = []
for i, f in enumerate(self.inputs.realignment_parameters):
files.insert(i, self._get_output_filenames(f, os.getcwd()))
if files:
outputs['stimcorr_files'] = files
return outputs
| |
from collections import deque, OrderedDict
import datetime
import logging
import re
from six import string_types, iteritems, binary_type, text_type, b
logger = logging.getLogger(__name__)
class NoMatchError(Exception):
pass
class unknown(binary_type):
__name__ = 'unknown'
def __new__(cls):
return super(unknown, cls).__new__(cls, cls.__name__)
def __str__(self):
return self.__name__
def __eq__(self, other):
return binary_type(self) == binary_type(other)
def test_float(v):
# Fixed-width integer codes are actually strings.
# if v and v[0] == '0' and len(v) > 1:
# return 0
try:
float(v)
return 1
except:
return 0
def test_int(v):
# Fixed-width integer codes are actually strings.
# if v and v[0] == '0' and len(v) > 1:
# return 0
try:
if float(v) == int(float(v)):
return 1
else:
return 0
except:
return 0
def test_string(v):
if isinstance(v, string_types):
return 1
if isinstance(v, binary_type):
return 1
else:
return 0
def test_datetime(v):
"""Test for ISO datetime."""
if not isinstance(v, string_types):
return 0
if len(v) > 22:
# Not exactly correct; ISO8601 allows fractional seconds
# which could result in a longer string.
return 0
if '-' not in v and ':' not in v:
return 0
for c in set(v): # Set of Unique characters
if not c.isdigit() and c not in 'T:-Z':
return 0
return 1
def test_time(v):
if not isinstance(v, string_types):
return 0
if len(v) > 15:
return 0
if ':' not in v:
return 0
for c in set(v): # Set of Unique characters
if not c.isdigit() and c not in 'T:Z.':
return 0
return 1
def test_date(v):
if not isinstance(v, string_types):
return 0
if len(v) > 10:
# Not exactly correct; ISO8601 allows fractional seconds
# which could result in a longer string.
return 0
if '-' not in v:
return 0
for c in set(v): # Set of Unique characters
if not c.isdigit() and c not in '-':
return 0
return 1
tests = [
(int, test_int),
(float, test_float),
(binary_type, test_string),
]
class Column(object):
position = None
header = None
type_counts = None
type_ratios = None
length = 0
count = 0
strings = None
def __init__(self):
self.type_counts = {k: 0 for k, v in tests}
self.type_counts[datetime.datetime] = 0
self.type_counts[datetime.date] = 0
self.type_counts[datetime.time] = 0
self.type_counts[None] = 0
self.type_counts[text_type] = 0
self.strings = deque(maxlen=1000)
self.position = None
self.header = None
self.count = 0
self.length = 0
self.date_successes = 0
self.description = None
def inc_type_count(self, t):
self.type_counts[t] += 1
def test(self, v):
from dateutil import parser
self.count += 1
if v is None:
self.type_counts[None] += 1
return None
try:
v = '{}'.format(v).encode('ascii')
except UnicodeEncodeError:
self.type_counts[text_type] += 1
return text_type
self.length = max(self.length, len(v))
try:
v = v.strip()
except AttributeError:
pass
if v == '':
self.type_counts[None] += 1
return None
for test, testf in tests:
t = testf(v)
if t > 0:
type_ = test
if test == binary_type:
if v not in self.strings:
self.strings.append(v)
if (self.count < 1000 or self.date_successes != 0) and any((c in b('-/:T')) for c in v):
try:
maybe_dt = parser.parse(
v, default=datetime.datetime.fromtimestamp(0))
except (TypeError, ValueError):
maybe_dt = None
if maybe_dt:
# Check which parts of the default the parser didn't change to find
# the real type
# HACK The time check will be wrong for the time of
# the start of the epoch, 16:00.
if maybe_dt.time() == datetime.datetime.fromtimestamp(0).time():
type_ = datetime.date
elif maybe_dt.date() == datetime.datetime.fromtimestamp(0).date():
type_ = datetime.time
else:
type_ = datetime.datetime
self.date_successes += 1
self.type_counts[type_] += 1
return type_
def _resolved_type(self):
"""Return the type for the columns, and a flag to indicate that the
column has codes."""
import datetime
self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None
for test, testf in tests + [(None, None)]}
# If it is more than 5% str, it's a str
if self.type_ratios[binary_type] > .05:
return binary_type, False
if self.type_counts[datetime.datetime] > 0:
num_type = datetime.datetime
elif self.type_counts[datetime.date] > 0:
num_type = datetime.date
elif self.type_counts[datetime.time] > 0:
num_type = datetime.time
elif self.type_counts[float] > 0:
num_type = float
elif self.type_counts[int] > 0:
num_type = int
elif self.type_counts[binary_type] > 0:
num_type = binary_type
elif self.type_counts[text_type] > 0:
num_type = text_type
else:
num_type = unknown
if self.type_counts[binary_type] > 0 and num_type != binary_type:
has_codes = True
else:
has_codes = False
return num_type, has_codes
@property
def resolved_type(self):
return self._resolved_type()[0]
@property
def resolved_type_name(self):
try:
return self.resolved_type.__name__
except AttributeError:
return self.resolved_type
@property
def has_codes(self):
return self._resolved_type()[1]
class TypeIntuiter(object):
"""Determine the types of rows in a table."""
header = None
counts = None
def __init__(self):
self._columns = OrderedDict()
def process_header(self, row):
header = row # Huh? Don't remember what this is for.
for i, value in enumerate(row):
if i not in header:
self._columns[i] = Column()
self._columns[i].position = i
self._columns[i].header = value
return self
def process_row(self, n, row):
for i, value in enumerate(row):
try:
if i not in self._columns:
self._columns[i] = Column()
self._columns[i].position = i
self._columns[i].test(value)
except Exception as e:
# This usually doesn't matter, since there are usually plenty of other rows to intuit from
# print 'Failed to add row: {}: {} {}'.format(row, type(e), e)
print(i, value, e)
raise
def run(self, source, total_rows=None):
MIN_SKIP_ROWS = 10000
if total_rows and total_rows > MIN_SKIP_ROWS:
skip_rows = int(total_rows / MIN_SKIP_ROWS)
skip_rows = skip_rows if skip_rows > 1 else None
else:
skip_rows = None
for i, row in enumerate(iter(source)):
if skip_rows and i % skip_rows != 0:
continue
self.process_row(i, row)
return self
@property
def columns(self):
for k, v in iteritems(self._columns):
v.position = k
yield v
def __str__(self):
from tabulate import tabulate
# return SingleTable([[ str(x) for x in row] for row in self.rows] ).table
results = self.results_table()
if len(results) > 1:
o = '\n' + binary_type(tabulate(results[1:], results[0], tablefmt='pipe'))
else:
o = ''
return 'TypeIntuiter ' + o
@staticmethod
def normalize_type(typ):
if isinstance(typ, string_types):
import datetime
m = dict(list(__builtins__.items()) + list(datetime.__dict__.items()))
if typ == 'unknown':
typ = binary_type
else:
typ = m[typ]
return typ
@staticmethod
def promote_type(orig_type, new_type):
"""Given a table with an original type, decide whether a new determination of a new applicable type
should overide the existing one"""
if not new_type:
return orig_type
if not orig_type:
return new_type
try:
orig_type = orig_type.__name__
except AttributeError:
pass
try:
new_type = new_type.__name__
except AttributeError:
pass
type_precidence = ['unknown', 'int', 'float', 'date', 'time', 'datetime', 'str', 'bytes', 'unicode']
# TODO This will fail for dates and times.
if type_precidence.index(new_type) > type_precidence.index(orig_type):
return new_type
else:
return orig_type
def results_table(self):
fields = 'position header length resolved_type has_codes count ints floats strs unicode nones datetimes dates times '.split()
header = list(fields)
# Shorten a few of the header names
header[0] = '#'
header[2] = 'size'
header[4] = 'codes'
header[9] = 'uni'
header[11] = 'dt'
rows = list()
rows.append(header)
for d in self._dump():
rows.append([d[k] for k in fields])
return rows
def _dump(self):
for v in self.columns:
d = {
'position': v.position,
'header': v.header,
'length': v.length,
'resolved_type': v.resolved_type_name,
'has_codes': v.has_codes,
'count': v.count,
'ints': v.type_counts.get(int, None),
'floats': v.type_counts.get(float, None),
'strs': v.type_counts.get(binary_type, None),
'unicode': v.type_counts.get(text_type, None),
'nones': v.type_counts.get(None, None),
'datetimes': v.type_counts.get(datetime.datetime, None),
'dates': v.type_counts.get(datetime.date, None),
'times': v.type_counts.get(datetime.time, None),
'strvals': b(',').join(list(v.strings)[:20])
}
yield d
class ClusterHeaders(object):
"""Using Source table headers, cluster the source tables into destination tables"""
def __init__(self, bundle=None):
self._bundle = bundle
self._headers = {}
def match_headers(self, a, b):
from difflib import ndiff
from collections import Counter
c = Counter(e[0] for e in ndiff(a, b) if e[0] != '?')
same = c.get(' ', 0)
remove = c.get('-', 0)
add = c.get('+', 0)
return float(remove+add) / float(same)
def match_headers_a(self, a, b):
from difflib import SequenceMatcher
for i, ca in enumerate(a):
for j, cb in enumerate(b):
r = SequenceMatcher(None, ca, cb).ratio()
if r > .9:
print(ca, cb)
break
def add_header(self, name, headers):
self._headers[name] = headers
def pairs(self):
return set([(name1, name2) for name1 in list(self._headers) for name2 in list(self._headers) if name2 > name1])
@classmethod
def long_substr(cls, data):
data = list(data)
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and cls.is_substr(data[0][i:i + j], data):
substr = data[0][i:i + j]
return substr
@classmethod
def is_substr(cls, find, data):
if len(data) < 1 and len(find) < 1:
return False
for i in range(len(data)):
if find not in data[i]:
return False
return True
def cluster(self):
pairs = self.pairs()
results = []
for a, b_ in pairs:
results.append((round(self.match_headers(self._headers[a], self._headers[b_]), 3), a, b_))
results = sorted(results, key=lambda r: r[0])
clusters = []
for r in results:
if r[0] < .3:
a = r[1]
b = r[2]
allocated = False
for c in clusters:
if a in c or b in c:
c.add(a)
c.add(b)
allocated = True
break
if not allocated:
ns = set([a, b])
clusters.append(ns)
d = {self.long_substr(c).strip('_'): sorted(c) for c in clusters}
return d
class RowIntuiter(object):
N_TEST_ROWS = 150
type_map = {
text_type: binary_type,
float: int}
def __init__(self, debug = False):
self.comment_lines = []
self.header_lines = []
self.start_line = 0
self.end_line = 0
self.data_pattern_source = None
self.patterns = (
('B', re.compile(r'^_+$')), # Blank
('C', re.compile(r'^XX_+$')), # Comment
('C', re.compile(r'^X_+$')), # Comment
('H', re.compile(r'^X+$')), # Header
('H', re.compile(r'^_{,6}X+$')), # Header, A few starting blanks, the rest are strings.
('H', re.compile(r"(?:X_)")), # Header
)
self.test_rows = []
self.debug = debug
self._logger = logging.getLogger('rowintuit')
self._logger.setLevel(logging.DEBUG if self.debug else logging.INFO)
def picture(self, row):
"""Create a simplified character representation of the data row, which can be pattern matched
with a regex """
template = '_Xn'
types = (type(None), binary_type, int)
def guess_type(v):
v = text_type(v).strip()
if not bool(v):
return type(None)
for t in (float, int, binary_type, text_type):
try:
return type(t(v))
except:
pass
def p(e):
try:
t = guess_type(e)
tm = self.type_map.get(t, t)
return template[types.index(tm)]
except ValueError:
raise ValueError("Type '{}'/'{}' not in the types list: {}".format(t, tm, types))
return ''.join(p(e) for e in row)
def _data_pattern_source(self, rows, change_limit=5):
l = max(len(row) for row in rows) # Length of longest row
patterns = [set() for _ in range(l)]
contributors = 0 # Number of rows that contributed to pattern.
for j, row in enumerate(rows):
changes = sum(1 for i, c in enumerate(self.picture(row)) if c not in patterns[i])
# The pattern should stabilize quickly, with new rows not changing many cells. If there is
# a large change, ignore it, as it may be spurious
if j > 0 and changes > change_limit:
continue
contributors += 1
for i, c in enumerate(self.picture(row)):
patterns[i].add(c)
pattern_source = ''.join("(?:{})".format('|'.join(s)) for s in patterns)
return pattern_source, contributors, l
def data_pattern(self, rows):
tests = 50
test_rows = min(20, len(rows))
def try_tests(tests, test_rows, rows):
# Look for the first row where you can generate a data pattern that does
# not have a large number of changes in subsequent rows.
for i in range(tests):
max_changes = len(rows[0]) / 4 # Data row should have fewer than 25% changes compared to next
test_rows_slice = rows[i: i + test_rows]
if not test_rows_slice:
continue
pattern_source, contributors, l = self._data_pattern_source(test_rows_slice, max_changes)
ave_cols = sum(1 for r in test_rows_slice for c in r) / len(test_rows_slice)
# If more the 75% of the rows contributed to the pattern, consider it good
if contributors > test_rows * .75:
return pattern_source, ave_cols
return (None, None)
pattern_source, ave_cols = try_tests(tests, test_rows, rows)
if not pattern_source:
from .exceptions import RowIntuitError
raise RowIntuitError('Failed to find data pattern')
pattern = re.compile(pattern_source)
return pattern, pattern_source, ave_cols
@staticmethod
def match_picture(picture, patterns):
for l, r in patterns:
if r.search(picture):
return l
return False
def run(self, head_rows, tail_rows=None, n_rows=None):
header_rows = []
found_header = False
data_pattern_skip_rows = min(30, len(head_rows) - 8)
try:
data_pattern, self.data_pattern_source, n_cols = self.data_pattern(head_rows[data_pattern_skip_rows:])
except Exception as e:
self._logger.debug("Failed to find data pattern")
raise
patterns = ([('D', data_pattern),
# More than 25% strings in row is header, if it isn't matched as data
('H', re.compile(r'X{{{},{}}}'.format(max(3, n_cols/8),max(3,n_cols/4)))),
] +
list(self.patterns))
if self.debug:
self._logger.debug("--- Patterns")
for e in patterns:
self._logger.debug(" {} {}".format(e[0], e[1].pattern))
for i, row in enumerate(head_rows):
picture = self.picture(row)
label = self.match_picture(picture, patterns)
try:
# If a header or data has more than half of the line is a continuous nulls,
# it's probably a comment.
if label != 'B' and len(re.search('_+', picture).group(0)) > len(row)/2:
label = 'C'
except AttributeError:
pass # re not matched
if not found_header and label == 'H':
found_header = True
if label is False:
if found_header:
label = 'D'
else:
# Could be a really wacky header
found_header = True
label = 'H'
if self.debug:
self._logger.debug("HEAD: {:<5} {} {} {}".format(i, label, picture, row))
if label == 'C':
self.comment_lines.append(i)
elif label == 'H':
self.header_lines.append(i)
header_rows.append(row)
elif label == 'D':
self.start_line = i
self.headers = self.coalesce_headers(header_rows)
break
if tail_rows:
from itertools import takewhile, islice
for i, row in enumerate(islice(reversed(tail_rows), 0, 10)):
picture = self.picture(row)
label = self.match_picture(picture, patterns)
self._logger.debug("TAIL: {:<5} {} {} {}".format(i, label, picture, row))
# Compute the data label for the end line, then reverse them.
labels = reversed(list(self.match_picture(self.picture(row), patterns) for row in tail_rows))
# Count the number of lines, from the end, that are either comment or blank
end_line = len(list(takewhile(lambda x: x == 'C' or x == 'B' or x == 'H', labels)))
if end_line:
self.end_line = n_rows-end_line-1
return self
@classmethod
def coalesce_headers(cls, header_lines):
import re
import six
header_lines = [list(hl) for hl in header_lines if bool(hl)]
if len(header_lines) == 0:
return []
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = six.text_type(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(text_type(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [re.sub(r'\s+', ' ', h.strip()) for h in headers]
return headers
| |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing local model information output methods
"""
from world import world, setup_module, teardown_module
import create_source_steps as source_create
import create_dataset_steps as dataset_create
import create_model_steps as model_create
import compare_predictions_steps as prediction_compare
import inspect_model_steps as inspect_model
class TestLocalModelOutputs(object):
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def teardown(self):
"""
Debug information
"""
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario1(self):
"""
Scenario: Successfully creating a model and translate the tree model into a set of IF-THEN rules:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
And I translate the tree into IF_THEN rules
Then I check the output is like "<expected_file>" expected file
Examples:
| data | time_1 | time_2 | time_3 | expected_file |
| data/iris.csv | 10 | 10 | 10 | data/model/if_then_rules_iris.txt |
| data/iris_sp_chars.csv | 10 | 10 | 10 | data/model/if_then_rules_iris_sp_chars.txt |
| data/spam.csv | 20 | 20 | 30 | data/model/if_then_rules_spam.txt |
| data/grades.csv | 10 | 10 | 10 | data/model/if_then_rules_grades.txt |
| data/diabetes.csv | 20 | 20 | 30 | data/model/if_then_rules_diabetes.txt |
| data/iris_missing2.csv | 10 | 10 | 10 | data/model/if_then_rules_iris_missing2.txt |
| data/tiny_kdd.csv | 20 | 20 | 30 | data/model/if_then_rules_tiny_kdd.txt |
"""
print self.test_scenario1.__doc__
examples = [
['data/iris.csv', '10', '10', '10', 'data/model/if_then_rules_iris.txt'],
['data/iris_sp_chars.csv', '10', '10', '10', 'data/model/if_then_rules_iris_sp_chars.txt'],
['data/spam.csv', '10', '10', '10', 'data/model/if_then_rules_spam.txt'],
['data/grades.csv', '10', '10', '10', 'data/model/if_then_rules_grades.txt'],
['data/diabetes.csv', '10', '10', '10', 'data/model/if_then_rules_diabetes.txt'],
['data/iris_missing2.csv', '10', '10', '10', 'data/model/if_then_rules_iris_missing2.txt'],
['data/tiny_kdd.csv', '10', '10', '10', 'data/model/if_then_rules_tiny_kdd.txt']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
inspect_model.i_translate_the_tree_into_IF_THEN_rules(self)
inspect_model.i_check_if_the_output_is_like_expected_file(self, example[4])
def test_scenario2(self):
"""
Scenario: Successfully creating a model with missing values and translate the tree model into a set of IF-THEN rules:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
And I translate the tree into IF_THEN rules
Then I check the output is like "<expected_file>" expected file
Examples:
| data | time_1 | time_2 | time_3 | expected_file |
| data/iris_missing2.csv | 10 | 10 | 10 | data/model/if_then_rules_iris_missing2_MISSINGS.txt |
"""
print self.test_scenario2.__doc__
examples = [
['data/iris_missing2.csv', '10', '10', '10', 'data/model/if_then_rules_iris_missing2_MISSINGS.txt']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model_with_missing_splits(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
inspect_model.i_translate_the_tree_into_IF_THEN_rules(self)
inspect_model.i_check_if_the_output_is_like_expected_file(self, example[4])
def test_scenario3(self):
"""
Scenario: Successfully creating a model and translate the tree model into a set of IF-THEN rules:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I update the source with "<options>" waiting less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
And I translate the tree into IF_THEN rules
Then I check the output is like "<expected_file>" expected file
Examples:
| data | time_1 | time_2 | time_3 | options | expected_file |
| data/spam.csv | 20 | 20 | 30 | {"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false, "language": "en"}}}} | data/model/if_then_rules_spam_textanalysis_1.txt |
| data/spam.csv | 20 | 20 | 30 | {"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false}}}} | data/model/if_then_rules_spam_textanalysis_2.txt |
| data/spam.csv | 20 | 20 | 30 | {"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": false, "stem_words": false, "use_stopwords": false, "language": "en"}}}} | data/model/if_then_rules_spam_textanalysis_3.txt |
| data/spam.csv | 20 | 20 | 30 | {"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": false, "stem_words": true, "use_stopwords": true, "language": "en"}}}} | data/model/if_then_rules_spam_textanalysis_4.txt |
| data/spam.csv | 20 | 20 | 30 | {"fields": {"000001": {"optype": "text", "term_analysis": {"token_mode": "full_terms_only", "language": "en"}}}} | data/model/if_then_rules_spam_textanalysis_5.txt |
| data/spam.csv | 20 | 20 | 30 | {"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false, "language": "en"}}}} | data/model/if_then_rules_spam_textanalysis_6.txt |
"""
print self.test_scenario3.__doc__
examples = [
['data/spam.csv', '10', '10', '10', '{"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false, "language": "en"}}}}','data/model/if_then_rules_spam_textanalysis_1.txt'],
['data/spam.csv', '10', '10', '10', '{"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false}}}}', 'data/model/if_then_rules_spam_textanalysis_2.txt'],
['data/spam.csv', '10', '10', '10', '{"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": false, "stem_words": false, "use_stopwords": false, "language": "en"}}}}', 'data/model/if_then_rules_spam_textanalysis_3.txt'],
['data/spam.csv', '10', '10', '10', '{"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": false, "stem_words": true, "use_stopwords": true, "language": "en"}}}}', 'data/model/if_then_rules_spam_textanalysis_4.txt'],
['data/spam.csv', '10', '10', '10', '{"fields": {"000001": {"optype": "text", "term_analysis": {"token_mode": "full_terms_only", "language": "en"}}}}', 'data/model/if_then_rules_spam_textanalysis_5.txt'],
['data/spam.csv', '10', '10', '10', '{"fields": {"000001": {"optype": "text", "term_analysis": {"case_sensitive": true, "stem_words": true, "use_stopwords": false, "language": "en"}}}}', 'data/model/if_then_rules_spam_textanalysis_6.txt']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
source_create.i_update_source_with(self, example[4])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
inspect_model.i_translate_the_tree_into_IF_THEN_rules(self)
inspect_model.i_check_if_the_output_is_like_expected_file(self, example[5])
def test_scenario4(self):
"""
Scenario: Successfully creating a model and check its data distribution:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
And I translate the tree into IF_THEN rules
Then I check the data distribution with "<expected_file>" file
Examples:
| data | time_1 | time_2 | time_3 | expected_file |
| data/iris.csv | 10 | 10 | 10 | data/model/data_distribution_iris.txt |
| data/iris_sp_chars.csv | 10 | 10 | 10 | data/model/data_distribution_iris_sp_chars.txt |
| data/spam.csv | 20 | 20 | 30 | data/model/data_distribution_spam.txt |
| data/grades.csv | 10 | 10 | 10 | data/model/data_distribution_grades.txt |
| data/diabetes.csv | 20 | 20 | 30 | data/model/data_distribution_diabetes.txt |
| data/iris_missing2.csv | 10 | 10 | 10 | data/model/data_distribution_iris_missing2.txt |
| data/tiny_kdd.csv | 20 | 20 | 30 | data/model/data_distribution_tiny_kdd.txt |
"""
print self.test_scenario4.__doc__
examples = [
['data/iris.csv', '10', '10', '10', 'data/model/data_distribution_iris.txt'],
['data/iris_sp_chars.csv', '10', '10', '10', 'data/model/data_distribution_iris_sp_chars.txt'],
['data/spam.csv', '10', '10', '10', 'data/model/data_distribution_spam.txt'],
['data/grades.csv', '10', '10', '10', 'data/model/data_distribution_grades.txt'],
['data/diabetes.csv', '10', '10', '10', 'data/model/data_distribution_diabetes.txt'],
['data/iris_missing2.csv', '10', '10', '10', 'data/model/data_distribution_iris_missing2.txt'],
['data/tiny_kdd.csv', '10', '10', '10', 'data/model/data_distribution_tiny_kdd.txt']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
inspect_model.i_check_the_data_distribution(self, example[4])
def test_scenario5(self):
"""
Scenario: Successfully creating a model and check its predictions distribution:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
And I translate the tree into IF_THEN rules
Then I check the predictions distribution with "<expected_file>" file
Examples:
| data | time_1 | time_2 | time_3 | expected_file |
| data/iris.csv | 10 | 10 | 10 | data/model/predictions_distribution_iris.txt |
| data/iris_sp_chars.csv | 10 | 10 | 10 | data/model/predictions_distribution_iris_sp_chars.txt |
| data/spam.csv | 20 | 20 | 30 | data/model/predictions_distribution_spam.txt |
| data/grades.csv | 10 | 10 | 10 | data/model/predictions_distribution_grades.txt |
| data/diabetes.csv | 20 | 20 | 30 | data/model/predictions_distribution_diabetes.txt |
| data/iris_missing2.csv | 10 | 10 | 10 | data/model/predictions_distribution_iris_missing2.txt |
| data/tiny_kdd.csv | 20 | 20 | 30 | data/model/predictions_distribution_tiny_kdd.txt |
"""
print self.test_scenario5.__doc__
examples = [
['data/iris.csv', '10', '10', '10', 'data/model/predictions_distribution_iris.txt'],
['data/iris_sp_chars.csv', '10', '10', '10', 'data/model/predictions_distribution_iris_sp_chars.txt'],
['data/spam.csv', '10', '10', '10', 'data/model/predictions_distribution_spam.txt'],
['data/grades.csv', '10', '10', '10', 'data/model/predictions_distribution_grades.txt'],
['data/diabetes.csv', '10', '10', '10', 'data/model/predictions_distribution_diabetes.txt'],
['data/iris_missing2.csv', '10', '10', '10', 'data/model/predictions_distribution_iris_missing2.txt'],
['data/tiny_kdd.csv', '10', '10', '10', 'data/model/predictions_distribution_tiny_kdd.txt']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
inspect_model.i_check_the_predictions_distribution(self, example[4])
def test_scenario6(self):
"""
Scenario: Successfully creating a model and check its summary information:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
And I translate the tree into IF_THEN rules
Then I check the model summary with "<expected_file>" file
Examples:
| data | time_1 | time_2 | time_3 | expected_file |
| data/iris.csv | 10 | 10 | 10 | data/model/summarize_iris.txt |
| data/iris_sp_chars.csv | 10 | 10 | 10 | data/model/summarize_iris_sp_chars.txt |
| data/spam.csv | 20 | 20 | 30 | data/model/summarize_spam.txt |
| data/grades.csv | 10 | 10 | 10 | data/model/summarize_grades.txt |
| data/diabetes.csv | 20 | 20 | 30 | data/model/summarize_diabetes.txt |
| data/iris_missing2.csv | 10 | 10 | 10 | data/model/summarize_iris_missing2.txt |
| data/tiny_kdd.csv | 20 | 20 | 30 | data/model/summarize_tiny_kdd.txt |
"""
print self.test_scenario6.__doc__
examples = [
['data/iris.csv', '10', '10', '10', 'data/model/summarize_iris.txt'],
['data/iris_sp_chars.csv', '10', '10', '10', 'data/model/summarize_iris_sp_chars.txt'],
['data/spam.csv', '10', '10', '10', 'data/model/summarize_spam.txt'],
['data/grades.csv', '10', '10', '10', 'data/model/summarize_grades.txt'],
['data/diabetes.csv', '10', '10', '10', 'data/model/summarize_diabetes.txt'],
['data/iris_missing2.csv', '10', '10', '10', 'data/model/summarize_iris_missing2.txt'],
['data/tiny_kdd.csv', '10', '10', '10', 'data/model/summarize_tiny_kdd.txt']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
inspect_model.i_check_the_model_summary_with(self, example[4])
| |
from sympy.core import Basic, S, C
from sympy.core.function import Function, Derivative, ArgumentIndexError
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core import Add, Mul
from sympy.core.relational import Eq, Ne
from sympy.utilities.iterables import iff
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
"""
nargs = 1
is_real = True
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_Function and arg.func == conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
arg = Add.make_args(arg)
for term in arg:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
included.append(term)
if len(arg) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) - im(b) + c
def _eval_conjugate(self):
return self
def as_real_imag(self, deep=True):
return (self, S.Zero)
def _eval_expand_complex(self, deep=True, **hints):
# if deep:
# return self.args[0].expand(deep, **hints).as_real_imag()[0]
# else:
return self.args[0].as_real_imag()[0]
def _eval_derivative(self, x):
return re(Derivative(self.args[0], x, **{'evaluate': True}))
class im(Function):
"""Returns imaginary part of expression. This function performs
only elementary analysis and so it will fail to decompose
properly more complicated expressions. If completely simplified
result is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
"""
nargs = 1
is_real = True
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_Function and arg.func == conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
arg = Add.make_args(arg)
for term in arg:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
included.append(term)
if len(arg) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) + re(b) + c
def _eval_conjugate(self):
return self
def as_real_imag(self, deep=True):
return (self, S.Zero)
def _eval_expand_complex(self, deep=True, **hints):
# if deep:
# return self.args[0].expand(deep, **hints).as_real_imag()[1]
return self.args[0].as_real_imag()[1]
def _eval_derivative(self, x):
return im(Derivative(self.args[0], x, **{'evaluate': True}))
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""Return the sign of an expression, that is:
-1 if expr < 0
0 if expr == 0
1 if expr > 0
"""
nargs = 1
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if arg.is_Function:
if arg.func is sign: return arg
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
is_neg = c.is_negative
for ai in args:
if ai.is_negative is None:
unk.append(ai)
elif ai.is_negative:
is_neg = not is_neg
if c is S.One and len(unk) == len(args):
return None
return iff(is_neg, S.NegativeOne, S.One) * cls(arg._new_rawargs(*unk))
is_bounded = True
def _eval_derivative(self, x):
return S.Zero
def _eval_conjugate(self):
return self
def _eval_is_zero(self):
return (self.args[0] is S.Zero)
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
class Abs(Function):
"""Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<type 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
"""
nargs = 1
is_real = True
is_negative = False
def fdiff(self, argindex=1):
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg.is_zero: return arg
if arg.is_positive: return arg
if arg.is_negative: return -arg
coeff, terms = arg.as_coeff_mul()
if coeff is not S.One:
return cls(coeff) * cls(Mul(*terms))
if arg.is_real is False:
return sqrt( (arg * arg.conjugate()).expand() )
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if exponent.is_even and base.is_real:
return arg
return
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_conjugate(self):
return self
def _eval_power(self,other):
if self.args[0].is_real and other.is_integer:
if other.is_even:
return self.args[0]**other
return
def _eval_nseries(self, x, n):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real:
return Derivative(self.args[0], x, **{'evaluate': True}) * sign(self.args[0])
return (re(self.args[0]) * re(Derivative(self.args[0], x,
**{'evaluate': True})) + im(self.args[0]) * im(Derivative(self.args[0],
x, **{'evaluate': True}))) / Abs(self.args[0])
class arg(Function):
"""Returns the argument (in radians) of a complex number"""
nargs = 1
is_real = True
is_bounded = True
@classmethod
def eval(cls, arg):
x, y = re(arg), im(arg)
arg = C.atan2(y, x)
if arg.is_number:
return arg
def _eval_conjugate(self):
return self
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, **{'evaluate': True}) - y *
Derivative(x, t, **{'evaluate': True})) / (x**2 + y**2)
class conjugate(Function):
"""Changes the sign of the imaginary part of a complex number.
>>> from sympy import conjugate, I
>>> conjugate(1 + I)
1 - I
"""
nargs = 1
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
return conjugate(Derivative(self.args[0], x, **{'evaluate': True}))
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
| |
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import eventlet
import netaddr
from oslo.serialization import jsonutils
import requests
from neutron.common import exceptions as n_exc
from neutron.extensions import providernet
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.common import config as c_conf
from neutron.plugins.cisco.db import network_db_v2
from neutron.plugins.cisco.extensions import n1kv
LOG = logging.getLogger(__name__)
class Client(object):
"""
Client for the Cisco Nexus1000V Neutron Plugin.
This client implements functions to communicate with
Cisco Nexus1000V VSM.
For every Neutron objects, Cisco Nexus1000V Neutron Plugin
creates a corresponding object in the controller (Cisco
Nexus1000V VSM).
CONCEPTS:
Following are few concepts used in Nexus1000V VSM:
port-profiles:
Policy profiles correspond to port profiles on Nexus1000V VSM.
Port profiles are the primary mechanism by which network policy is
defined and applied to switch interfaces in a Nexus 1000V system.
network-segment:
Each network-segment represents a broadcast domain.
network-segment-pool:
A network-segment-pool contains one or more network-segments.
logical-network:
A logical-network contains one or more network-segment-pools.
bridge-domain:
A bridge-domain is created when the network-segment is of type VXLAN.
Each VXLAN <--> VLAN combination can be thought of as a bridge domain.
ip-pool:
Each ip-pool represents a subnet on the Nexus1000V VSM.
vm-network:
vm-network refers to a network-segment and policy-profile.
It maintains a list of ports that uses the network-segment and
policy-profile this vm-network refers to.
events:
Events correspond to commands that are logged on Nexus1000V VSM.
Events are used to poll for a certain resource on Nexus1000V VSM.
Event type of port_profile: Return all updates/create/deletes
of port profiles from the VSM.
Event type of port_profile_update: Return only updates regarding
policy-profiles.
Event type of port_profile_delete: Return only deleted policy profiles.
WORK FLOW:
For every network profile a corresponding logical-network and
a network-segment-pool, under this logical-network, will be created.
For every network created from a given network profile, a
network-segment will be added to the network-segment-pool corresponding
to that network profile.
A port is created on a network and associated with a policy-profile.
Hence for every unique combination of a network and a policy-profile, a
unique vm-network will be created and a reference to the port will be
added. If the same combination of network and policy-profile is used by
another port, the references to that port will be added to the same
vm-network.
"""
# Define paths for the URI where the client connects for HTTP requests.
port_profiles_path = "/virtual-port-profile"
network_segment_path = "/network-segment/%s"
network_segment_pool_path = "/network-segment-pool/%s"
ip_pool_path = "/ip-pool-template/%s"
ports_path = "/kvm/vm-network/%s/ports"
port_path = "/kvm/vm-network/%s/ports/%s"
vm_networks_path = "/kvm/vm-network"
vm_network_path = "/kvm/vm-network/%s"
bridge_domains_path = "/kvm/bridge-domain"
bridge_domain_path = "/kvm/bridge-domain/%s"
logical_network_path = "/logical-network/%s"
events_path = "/kvm/events"
clusters_path = "/cluster"
encap_profiles_path = "/encapsulation-profile"
encap_profile_path = "/encapsulation-profile/%s"
pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size)
def __init__(self, **kwargs):
"""Initialize a new client for the plugin."""
self.format = 'json'
self.hosts = self._get_vsm_hosts()
self.action_prefix = 'http://%s/api/n1k' % self.hosts[0]
self.timeout = c_conf.CISCO_N1K.http_timeout
def list_port_profiles(self):
"""
Fetch all policy profiles from the VSM.
:returns: JSON string
"""
return self._get(self.port_profiles_path)
def create_bridge_domain(self, network, overlay_subtype):
"""
Create a bridge domain on VSM.
:param network: network dict
:param overlay_subtype: string representing subtype of overlay network
"""
body = {'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX,
'segmentId': network[providernet.SEGMENTATION_ID],
'subType': overlay_subtype,
'tenantId': network['tenant_id']}
if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN:
body['groupIp'] = network[n1kv.MULTICAST_IP]
return self._post(self.bridge_domains_path,
body=body)
def delete_bridge_domain(self, name):
"""
Delete a bridge domain on VSM.
:param name: name of the bridge domain to be deleted
"""
return self._delete(self.bridge_domain_path % name)
def create_network_segment(self, network, network_profile):
"""
Create a network segment on the VSM.
:param network: network dict
:param network_profile: network profile dict
"""
body = {'publishName': network['id'],
'description': network['name'],
'id': network['id'],
'tenantId': network['tenant_id'],
'networkSegmentPool': network_profile['id'], }
if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN:
body['vlan'] = network[providernet.SEGMENTATION_ID]
elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY:
body['bridgeDomain'] = (network['id'] +
c_const.BRIDGE_DOMAIN_SUFFIX)
if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK:
body['mode'] = c_const.NETWORK_TYPE_TRUNK
body['segmentType'] = network_profile['sub_type']
if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN:
body['addSegments'] = network['add_segment_list']
body['delSegments'] = network['del_segment_list']
else:
body['encapProfile'] = (network['id'] +
c_const.ENCAPSULATION_PROFILE_SUFFIX)
else:
body['mode'] = 'access'
body['segmentType'] = network_profile['segment_type']
return self._post(self.network_segment_path % network['id'],
body=body)
def update_network_segment(self, network_segment_id, body):
"""
Update a network segment on the VSM.
Network segment on VSM can be updated to associate it with an ip-pool
or update its description and segment id.
:param network_segment_id: UUID representing the network segment
:param body: dict of arguments to be updated
"""
return self._post(self.network_segment_path % network_segment_id,
body=body)
def delete_network_segment(self, network_segment_id):
"""
Delete a network segment on the VSM.
:param network_segment_id: UUID representing the network segment
"""
return self._delete(self.network_segment_path % network_segment_id)
def create_logical_network(self, network_profile, tenant_id):
"""
Create a logical network on the VSM.
:param network_profile: network profile dict
:param tenant_id: UUID representing the tenant
"""
LOG.debug("Logical network")
body = {'description': network_profile['name'],
'tenantId': tenant_id}
logical_network_name = (network_profile['id'] +
c_const.LOGICAL_NETWORK_SUFFIX)
return self._post(self.logical_network_path % logical_network_name,
body=body)
def delete_logical_network(self, logical_network_name):
"""
Delete a logical network on VSM.
:param logical_network_name: string representing name of the logical
network
"""
return self._delete(
self.logical_network_path % logical_network_name)
def create_network_segment_pool(self, network_profile, tenant_id):
"""
Create a network segment pool on the VSM.
:param network_profile: network profile dict
:param tenant_id: UUID representing the tenant
"""
LOG.debug("network_segment_pool")
logical_network_name = (network_profile['id'] +
c_const.LOGICAL_NETWORK_SUFFIX)
body = {'name': network_profile['name'],
'description': network_profile['name'],
'id': network_profile['id'],
'logicalNetwork': logical_network_name,
'tenantId': tenant_id}
if network_profile['segment_type'] == c_const.NETWORK_TYPE_OVERLAY:
body['subType'] = network_profile['sub_type']
return self._post(
self.network_segment_pool_path % network_profile['id'],
body=body)
def update_network_segment_pool(self, network_profile):
"""
Update a network segment pool on the VSM.
:param network_profile: network profile dict
"""
body = {'name': network_profile['name'],
'description': network_profile['name']}
return self._post(self.network_segment_pool_path %
network_profile['id'], body=body)
def delete_network_segment_pool(self, network_segment_pool_id):
"""
Delete a network segment pool on the VSM.
:param network_segment_pool_id: UUID representing the network
segment pool
"""
return self._delete(self.network_segment_pool_path %
network_segment_pool_id)
def create_ip_pool(self, subnet):
"""
Create an ip-pool on the VSM.
:param subnet: subnet dict
"""
if subnet['cidr']:
try:
ip = netaddr.IPNetwork(subnet['cidr'])
netmask = str(ip.netmask)
network_address = str(ip.network)
except (ValueError, netaddr.AddrFormatError):
msg = _("Invalid input for CIDR")
raise n_exc.InvalidInput(error_message=msg)
else:
netmask = network_address = ""
if subnet['allocation_pools']:
address_range_start = subnet['allocation_pools'][0]['start']
address_range_end = subnet['allocation_pools'][0]['end']
else:
address_range_start = None
address_range_end = None
body = {'addressRangeStart': address_range_start,
'addressRangeEnd': address_range_end,
'ipAddressSubnet': netmask,
'description': subnet['name'],
'gateway': subnet['gateway_ip'],
'dhcp': subnet['enable_dhcp'],
'dnsServersList': subnet['dns_nameservers'],
'networkAddress': network_address,
'netSegmentName': subnet['network_id'],
'id': subnet['id'],
'tenantId': subnet['tenant_id']}
return self._post(self.ip_pool_path % subnet['id'],
body=body)
def update_ip_pool(self, subnet):
"""
Update an ip-pool on the VSM.
:param subnet: subnet dictionary
"""
body = {'description': subnet['name'],
'dhcp': subnet['enable_dhcp'],
'dnsServersList': subnet['dns_nameservers']}
return self._post(self.ip_pool_path % subnet['id'],
body=body)
def delete_ip_pool(self, subnet_id):
"""
Delete an ip-pool on the VSM.
:param subnet_id: UUID representing the subnet
"""
return self._delete(self.ip_pool_path % subnet_id)
def create_vm_network(self,
port,
vm_network_name,
policy_profile):
"""
Create a VM network on the VSM.
:param port: port dict
:param vm_network_name: name of the VM network
:param policy_profile: policy profile dict
"""
body = {'name': vm_network_name,
'networkSegmentId': port['network_id'],
'networkSegment': port['network_id'],
'portProfile': policy_profile['name'],
'portProfileId': policy_profile['id'],
'tenantId': port['tenant_id'],
'portId': port['id'],
'macAddress': port['mac_address'],
}
if port.get('fixed_ips'):
body['ipAddress'] = port['fixed_ips'][0]['ip_address']
body['subnetId'] = port['fixed_ips'][0]['subnet_id']
return self._post(self.vm_networks_path,
body=body)
def delete_vm_network(self, vm_network_name):
"""
Delete a VM network on the VSM.
:param vm_network_name: name of the VM network
"""
return self._delete(self.vm_network_path % vm_network_name)
def create_n1kv_port(self, port, vm_network_name):
"""
Create a port on the VSM.
:param port: port dict
:param vm_network_name: name of the VM network which imports this port
"""
body = {'id': port['id'],
'macAddress': port['mac_address']}
if port.get('fixed_ips'):
body['ipAddress'] = port['fixed_ips'][0]['ip_address']
body['subnetId'] = port['fixed_ips'][0]['subnet_id']
return self._post(self.ports_path % vm_network_name,
body=body)
def update_n1kv_port(self, vm_network_name, port_id, body):
"""
Update a port on the VSM.
Update the mac address associated with the port
:param vm_network_name: name of the VM network which imports this port
:param port_id: UUID of the port
:param body: dict of the arguments to be updated
"""
return self._post(self.port_path % (vm_network_name, port_id),
body=body)
def delete_n1kv_port(self, vm_network_name, port_id):
"""
Delete a port on the VSM.
:param vm_network_name: name of the VM network which imports this port
:param port_id: UUID of the port
"""
return self._delete(self.port_path % (vm_network_name, port_id))
def _do_request(self, method, action, body=None,
headers=None):
"""
Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
VSM in plain text format.
Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP
status code (500) i.e. an error has occurred on the VSM or SERVICE
UNAVAILABLE (503) i.e. VSM is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict for arguments which are sent as part of the request
:param headers: header for the HTTP request
:returns: JSON or plain text in HTTP response
"""
action = self.action_prefix + action
if not headers and self.hosts:
headers = self._get_auth_header(self.hosts[0])
headers['Content-Type'] = self._set_content_type('json')
headers['Accept'] = self._set_content_type('json')
if body:
body = jsonutils.dumps(body, indent=2)
LOG.debug("req: %s", body)
try:
resp = self.pool.spawn(requests.request,
method,
url=action,
data=body,
headers=headers,
timeout=self.timeout).wait()
except Exception as e:
raise c_exc.VSMConnectionFailed(reason=e)
LOG.debug("status_code %s", resp.status_code)
if resp.status_code == requests.codes.OK:
if 'application/json' in resp.headers['content-type']:
try:
return resp.json()
except ValueError:
return {}
elif 'text/plain' in resp.headers['content-type']:
LOG.debug("VSM: %s", resp.text)
else:
raise c_exc.VSMError(reason=resp.text)
def _set_content_type(self, format=None):
"""
Set the mime-type to either 'xml' or 'json'.
:param format: format to be set.
:return: mime-type string
"""
if not format:
format = self.format
return "application/%s" % format
def _delete(self, action, body=None, headers=None):
return self._do_request("DELETE", action, body=body,
headers=headers)
def _get(self, action, body=None, headers=None):
return self._do_request("GET", action, body=body,
headers=headers)
def _post(self, action, body=None, headers=None):
return self._do_request("POST", action, body=body,
headers=headers)
def _put(self, action, body=None, headers=None):
return self._do_request("PUT", action, body=body,
headers=headers)
def _get_vsm_hosts(self):
"""
Retrieve a list of VSM ip addresses.
:return: list of host ip addresses
"""
return [cr[c_const.CREDENTIAL_NAME] for cr in
network_db_v2.get_all_n1kv_credentials()]
def _get_auth_header(self, host_ip):
"""
Retrieve header with auth info for the VSM.
:param host_ip: IP address of the VSM
:return: authorization header dict
"""
username = c_cred.Store.get_username(host_ip)
password = c_cred.Store.get_password(host_ip)
auth = base64.encodestring("%s:%s" % (username, password)).rstrip()
header = {"Authorization": "Basic %s" % auth}
return header
def get_clusters(self):
"""Fetches a list of all vxlan gateway clusters."""
return self._get(self.clusters_path)
def create_encapsulation_profile(self, encap):
"""
Create an encapsulation profile on VSM.
:param encap: encapsulation dict
"""
body = {'name': encap['name'],
'addMappings': encap['add_segment_list'],
'delMappings': encap['del_segment_list']}
return self._post(self.encap_profiles_path,
body=body)
def update_encapsulation_profile(self, context, profile_name, body):
"""
Adds a vlan to bridge-domain mapping to an encapsulation profile.
:param profile_name: Name of the encapsulation profile
:param body: mapping dictionary
"""
return self._post(self.encap_profile_path
% profile_name, body=body)
def delete_encapsulation_profile(self, name):
"""
Delete an encapsulation profile on VSM.
:param name: name of the encapsulation profile to be deleted
"""
return self._delete(self.encap_profile_path % name)
| |
# This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging.handlers
import re
import sys
import types
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict)
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
from inspect import getargspec
from nova import exception
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
fn = self.mox.CreateMockAnything()
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_extend(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
info = self.mox.CreateMockAnything()
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024 * 1024 * 1024
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / (1024 * 1024 * 1024)))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.libvirt_utils, 'create_lvm_image')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(libvirt_images_rbd_pool=self.POOL)
self.flags(rbd_user=self.USER)
self.flags(libvirt_images_rbd_ceph_conf=self.CONF)
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.rbd = self.mox.CreateMockAnything()
self.rados = self.mox.CreateMockAnything()
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
return fn
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
fn = self.mox.CreateMockAnything()
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(rbd=self.rbd, target=self.TEMPLATE_PATH)
self.rbd.RBD_FEATURE_LAYERING = 1
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
def fake_resize(rbd_name, size):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(getargspec(imagebackend.Image.libvirt_info),
getargspec(self.image_class.libvirt_info))
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_lvm(self):
self.flags(libvirt_images_volume_group='FakeVG')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(libvirt_images_rbd_pool=pool)
self.flags(libvirt_images_rbd_ceph_conf=conf)
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
| |
import threading
from OSC import *
from pymt import *
def binary(n, digits=8):
'''
Based on: http://www.daniweb.com/code/snippet216539.html
'''
data = '{0:0>{1}}'.format(bin(n)[2:], digits)
return [int(x) for x in data]
class MTMonome(MTButtonMatrix):
'''
An pymt implementation of monome, based on MTButtonMatrix and SerialOSC.
http://github.com/tehn/serialoscp
http://monome.org/data/app/monomeserial/osc
'''
def __init__(self, **kwargs):
kwargs.setdefault('matrix_size', (8,8))
kwargs.setdefault('size', (400, 400))
super(MTMonome, self).__init__(**kwargs)
self.prefix = '/box'
self.address = 'localhost'
self.output_port = 8000
self.input_port = 8080
try:
self.osc_client = OSCClient()
self.osc_client.connect((self.address, self.output_port))
except OSCClientError as e:
print '[ERROR] OSCClient', e
try:
self.osc_server = OSCServer((self.address, self.input_port))
self.osc_server.addDefaultHandlers()
self.osc_server.addMsgHandler('default', self.osc_handler)
self.server_thread = threading.Thread(target=self.osc_server.serve_forever)
self.server_thread.start()
except IOError as e:
print '[ERROR] OSCServer', e
def on_press(self, state):
'''
Send an osc message when a button is touched down an up
state = (row, column, state)
/prefix/press [x] [y] [state]
state: 1 (down) or 0 (up)
'''
y = state[0]
x = state[1]
state = state[2]
m = OSCMessage(self.prefix + '/press')
m.append([y, (self.matrix_size[1]-1)-x, state])
try:
self.osc_client.send(m)
except OSCClientError as e:
print '[ERROR] OSCClient.send', e
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
i,j = self.collide_point(touch.x, touch.y)
self.dispatch_event('on_value_change', self.matrix)
self.dispatch_event('on_press', (i,j, 1))
self.last_tile = (i,j)
def on_touch_up(self, touch):
if self.collide_point(touch.x, touch.y):
i,j = self.collide_point(touch.x, touch.y)
self.dispatch_event('on_value_change', self.matrix)
self.dispatch_event('on_press', (i,j, 0))
self.last_tile = (i,j)
def osc_handler(self, addr, tags, data, client_address):
'''
Handle incoming messages
'''
print '[OSC]', addr, tags, data, client_address
if addr == self.prefix + '/led':
'''
/prefix/led [x] [y] [state]
state: 1 (on) or 0 (off)
'''
x = data[0]
y = (self.matrix_size[1]-1)-data[1]
state = data[2]
self.matrix[x][y] = state
elif addr == self.prefix + '/led_row':
'''
/prefix/led_row [row] [data]
row: which row to update
data: one byte of data (8 led positions)
'''
row = (self.matrix_size[1]-1)-data[0]
data = binary(data[1]) #ex. [1,0,1,0,1,0,0,1]
for i in range(self.matrix_size[1]):
self.matrix[i][row] = data[i]
elif addr == self.prefix + '/led_col':
'''
/prefix/led_col [col] [data]
col: which column to update
data: one byte of data (8 led positions)
'''
column = data[0]
data = binary(data[1])
self.matrix[column] = data
elif addr == self.prefix + '/frame':
'''
/prefix/frame [A B C D E F G H]
update a display, offset by x and y.
'''
self.matrix = [binary(y) for y in data]
elif addr == self.prefix + '/clear':
'''
/prefix/clear [state]
state: 0 (off, default if unspecified) or 1 (on)
'''
if len(data):
self.clear(data[0])
else:
self.clear(0)
#Sys Msgs
elif addr == '/sys/prefix':
'''
/sys/prefix [string]
change prefix to [string]
Return:
/sys/prefix [newprefix]
'''
prefix = data[0]
self.prefix = prefix
m = OSCMessage('/sys/press')
m.append(self.prefix)
try:
self.osc_client.send(m)
except OSCClientError as e:
print '[ERROR] OSCClient.send', e
elif addr == '/sys/cable':
'''
/sys/cable [left|up|right|down]
changes cable setting for the unit
'''
pass
elif addr == '/sys/offset':
'''
/sys/offset x y
changes offset value for the unit
'''
pass
elif addr == '/sys/intensity':
'''
/sys/intensity 0.
changes unit intensity
'''
self.intensity(data[0])
elif addr == '/sys/test':
'''
/sys/test [0|1]
toggles test mode for the unit (turn on/off all leds)
'''
if data[0]:
import time
for i in range(9):
self.clear(i%2)
time.sleep(0.5)
elif addr == '/sys/report':
'''
/sys/report
Return:
/sys/prefix
/sys/cable
/sys/offset
'''
pass
return
def clear(self, state):
if state:
self.matrix = [[1 for i in range(self.matrix_size[1])] for j in range(self.matrix_size[0])]
else:
self.reset()
def intensity(self, intensity):
r, g, b, i = self.downcolor
self.downcolor = (r,g,b,intensity)
def close(self):
self.osc_client.close()
self.osc_server.close()
self.server_thread.join()
class MTScatterMonome(MTScatterWidget):
def __init__(self, **kwargs):
self.monome = MTMonome()
kwargs.setdefault('size', (self.monome.width + 30, self.monome.height + 30))
super(MTScatterMonome, self).__init__(**kwargs)
self.monome.pos = (15 + self.monome.x, 15 + self.monome.y)
self.add_widget(self.monome)
if __name__ == '__main__':
additional_css = '''
.simple {
border-width: 3;
draw-border: 1;
bg-color: rgb(100, 100, 200, 255);
touch-color: rgba(100, 100, 250, 255);
}
.dois {
border-width: 3;
draw-border: 1;
bg-color: rgb(200, 100, 100, 255);
touch-color: rgba(100, 100, 250, 255);
}
'''
css_add_sheet(additional_css)
window = MTWindow()
#monome = MTMonome()
#window.add_widget(monome)
smonome = MTScatterMonome(cls=('simple'))
window.add_widget(smonome)
try:
runTouchApp()
except KeyboardInterrupt:
#monome.close()
smonome.monome.close()
| |
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20160605.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'yuku'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'yuku.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--lua-script", "yuku.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--no-cookies",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "yuku.com",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "yuku-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("yuku-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_name, item_type, item_value, item_thread = item_name.split(':', 3)
item['item_type'] = item_type
item['item_value'] = item_value
item['item_thread'] = item_thread
# Example item: yuku:10threads:deltasforest29697:17
assert item_type in ('thread', '10threads')
if item_type == 'thread':
wget_args.append('http://%s.yuku.com/topic/%s/'%(item_value, item_thread))
elif item_type == '10threads':
suffixes = string.digits
for suffix in suffixes:
wget_args.append('http://%s.yuku.com/topic/%s%s/'%(item_value, item_thread, suffix))
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="yuku",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/5/58/Yuku-beta-logo.gif" height="50px" title=""/>
<h2>yuku.com<span class="links"><a href="http://yuku.com/">Website</a> · <a href="http://tracker.archiveteam.org/yuku/">Leaderboard</a></span></h2>
<p>Yuku is curently very unstable and hosting a lot of forums.</p>
"""
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="yuku"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_thread": ItemValue("item_thread"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from qtpy import QtGui
from qtconsole.qstringhelpers import qstring_length
from ipython_genutils.py3compat import PY3, string_types
from pygments.formatters.html import HtmlFormatter
from pygments.lexer import RegexLexer, _TokenType, Text, Error
from pygments.lexers import PythonLexer, Python3Lexer
from pygments.styles import get_style_by_name
def get_tokens_unprocessed(self, text, stack=('root',)):
""" Split ``text`` into (tokentype, text) pairs.
Monkeypatched to store the final stack on the object itself.
The `text` parameter this gets passed is only the current line, so to
highlight things like multiline strings correctly, we need to retrieve
the state from the previous line (this is done in PygmentsHighlighter,
below), and use it to continue processing the current line.
"""
pos = 0
tokendefs = self._tokens
if hasattr(self, '_saved_state_stack'):
statestack = list(self._saved_state_stack)
else:
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
self._saved_state_stack = list(statestack)
# Monkeypatch!
RegexLexer.get_tokens_unprocessed = get_tokens_unprocessed
class PygmentsBlockUserData(QtGui.QTextBlockUserData):
""" Storage for the user data associated with each line.
"""
syntax_stack = ('root',)
def __init__(self, **kwds):
for key, value in kwds.items():
setattr(self, key, value)
QtGui.QTextBlockUserData.__init__(self)
def __repr__(self):
attrs = ['syntax_stack']
kwds = ', '.join([ '%s=%r' % (attr, getattr(self, attr))
for attr in attrs ])
return 'PygmentsBlockUserData(%s)' % kwds
class PygmentsHighlighter(QtGui.QSyntaxHighlighter):
""" Syntax highlighter that uses Pygments for parsing. """
#---------------------------------------------------------------------------
# 'QSyntaxHighlighter' interface
#---------------------------------------------------------------------------
def __init__(self, parent, lexer=None):
super(PygmentsHighlighter, self).__init__(parent)
self._document = self.document()
self._formatter = HtmlFormatter(nowrap=True)
self.set_style('default')
if lexer is not None:
self._lexer = lexer
else:
if PY3:
self._lexer = Python3Lexer()
else:
self._lexer = PythonLexer()
def highlightBlock(self, string):
""" Highlight a block of text.
"""
prev_data = self.currentBlock().previous().userData()
if prev_data is not None:
self._lexer._saved_state_stack = prev_data.syntax_stack
elif hasattr(self._lexer, '_saved_state_stack'):
del self._lexer._saved_state_stack
# Lex the text using Pygments
index = 0
for token, text in self._lexer.get_tokens(string):
length = qstring_length(text)
self.setFormat(index, length, self._get_format(token))
index += length
if hasattr(self._lexer, '_saved_state_stack'):
data = PygmentsBlockUserData(
syntax_stack=self._lexer._saved_state_stack)
self.currentBlock().setUserData(data)
# Clean up for the next go-round.
del self._lexer._saved_state_stack
#---------------------------------------------------------------------------
# 'PygmentsHighlighter' interface
#---------------------------------------------------------------------------
def set_style(self, style):
""" Sets the style to the specified Pygments style.
"""
if isinstance(style, string_types):
style = get_style_by_name(style)
self._style = style
self._clear_caches()
def set_style_sheet(self, stylesheet):
""" Sets a CSS stylesheet. The classes in the stylesheet should
correspond to those generated by:
pygmentize -S <style> -f html
Note that 'set_style' and 'set_style_sheet' completely override each
other, i.e. they cannot be used in conjunction.
"""
self._document.setDefaultStyleSheet(stylesheet)
self._style = None
self._clear_caches()
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _clear_caches(self):
""" Clear caches for brushes and formats.
"""
self._brushes = {}
self._formats = {}
def _get_format(self, token):
""" Returns a QTextCharFormat for token or None.
"""
if token in self._formats:
return self._formats[token]
if self._style is None:
result = self._get_format_from_document(token, self._document)
else:
result = self._get_format_from_style(token, self._style)
self._formats[token] = result
return result
def _get_format_from_document(self, token, document):
""" Returns a QTextCharFormat for token by
"""
code, html = next(self._formatter._format_lines([(token, u'dummy')]))
self._document.setHtml(html)
return QtGui.QTextCursor(self._document).charFormat()
def _get_format_from_style(self, token, style):
""" Returns a QTextCharFormat for token by reading a Pygments style.
"""
result = QtGui.QTextCharFormat()
for key, value in style.style_for_token(token).items():
if value:
if key == 'color':
result.setForeground(self._get_brush(value))
elif key == 'bgcolor':
result.setBackground(self._get_brush(value))
elif key == 'bold':
result.setFontWeight(QtGui.QFont.Bold)
elif key == 'italic':
result.setFontItalic(True)
elif key == 'underline':
result.setUnderlineStyle(
QtGui.QTextCharFormat.SingleUnderline)
elif key == 'sans':
result.setFontStyleHint(QtGui.QFont.SansSerif)
elif key == 'roman':
result.setFontStyleHint(QtGui.QFont.Times)
elif key == 'mono':
result.setFontStyleHint(QtGui.QFont.TypeWriter)
return result
def _get_brush(self, color):
""" Returns a brush for the color.
"""
result = self._brushes.get(color)
if result is None:
qcolor = self._get_color(color)
result = QtGui.QBrush(qcolor)
self._brushes[color] = result
return result
def _get_color(self, color):
""" Returns a QColor built from a Pygments color string.
"""
qcolor = QtGui.QColor()
qcolor.setRgb(int(color[:2], base=16),
int(color[2:4], base=16),
int(color[4:6], base=16))
return qcolor
| |
"""A web front end for the Lytspel converter."""
from collections import OrderedDict
from glob import glob
import os
from os import path
import logging
import re
import string
from time import time
from typing import Match, NamedTuple, Optional, Sequence, Tuple
# The following import is for mypy only
from typing import Dict #pylint: disable=unused-import
from uuid import uuid4
from flask import (Flask, flash, make_response, Markup, render_template, request, redirect,
send_file, send_from_directory)
from flask.logging import create_logger
import misaka
from werkzeug.utils import secure_filename
from werkzeug.wrappers import Response
from .conv import Converter
from .util import readfile
##### Types #####
PageData = NamedTuple('PageData', [('title', str), ('content', Markup)])
SamplePara = NamedTuple('SamplePara', [('tradspell', Markup), ('lytspel', Markup)])
SamplePageData = NamedTuple('SamplePageData', [('title', str), ('paras', Sequence[SamplePara])])
##### Helpers for building immutable values #####
def replace_first_line(text: str, first_line: str) -> str:
"""Replaced the first line in 'text', setting it to 'first_line' instead.
A modified copy of the text is returned.
"""
lines = text.splitlines()
lines[0] = first_line
return '\n'.join(lines)
def split_at(text: str, sep: str) -> Tuple[str, str]:
"""Split 'text' at the specified separator.
Returns a 2 tuple: the part before the separator and the rest of the string (starting
with the separator).
Raises a ValueError if 'text' does not contain 'sep'.
"""
parts = text.partition(sep)
if not parts[1]:
raise ValueError('"{}" not found in string'.format(sep))
return parts[0], parts[1] + parts[2]
def hexify_string(text: str) -> str:
"""Replace each character in a string by the corresponding HTML character entity."""
return ''.join('&#x{:x};'.format(ord(c)) for c in text)
def text_to_id(text: str) -> str:
"""Convert a text (section title or similar) into an ID.
* Punctuation and outer whitespace is stripped.
* Each inner whitespace sequence is converted to a hyphen.
* Letters are converted to lowercase.
"""
text = text.strip().lower()
text = text.translate(str.maketrans('', '', string.punctuation))
return '-'.join(text.split())
def add_section_anchor(matchobj: Match) -> str:
"""Add anchors (IDs) to section headers.
Note: This is quite simple and will NOT handle repeated sections with the same title
correctly.
"""
idtext = text_to_id(matchobj.group(1))
return matchobj.group(0).replace('>', f' id="{idtext}">', 1)
def protect_mailto(matchobj: Match) -> str:
"""Helper function that spam-protects mailto links."""
target = hexify_string(matchobj.group(1))
linktext = matchobj.group(2)
if '@' in linktext:
linktext = hexify_string(linktext)
return '<a href="mailto:{}">{}</a>'.format(target, linktext)
def markdown_markup(text: str,
move_headers_up: bool = False,
protect_email_addresses: bool = False) -> Markup:
"""Parse a text as Markdown and style it for Bootstrap.
If 'move_headers_up' is True, all headers are moved one level up: H2 becomes H1,
H3 becomes H2 etc.
If 'protect_email_addresses' is True, mailto links in the generated HTML are obfuscated
to make it a bit harder for spammers to harvest email addresses.
"""
if move_headers_up:
text = text.replace('## ', '# ')
html = (misaka.smartypants(misaka.html(text))
.replace(''', '’') # smartypants sometimes gets this wrong
.replace('®', '(r)') # revert unwanted replacement
.replace('<blockquote>', '<blockquote class="alert alert-secondary">')
.replace('<pre>', '<pre class="alert alert-primary">'))
# Add anchors to section headers
html = re.sub('<h2>(.*?)</h2>', add_section_anchor, html)
if protect_email_addresses:
html = re.sub('<a href="mailto:([^"]+)">([^<]+)</a>', protect_mailto, html)
return Markup(html)
def extract_title_from_text(text: str) -> str:
"""Extract and return the title line from a text written in Markdown.
Returns the first line of the original text, minus any header markup ('#') at the start
of the line.
"""
firstline = text.split('\n', 1)[0]
return firstline.lstrip('# ')
def build_page_dict() -> 'OrderedDict[str, PageData]':
"""Build a dictionary from URLs to pre-rendered PageData."""
# pylint: disable=too-many-locals
result = OrderedDict() # type: OrderedDict[str, PageData]
readme = readfile('README.md')
idea, rest = split_at(readme, '## The Rules of Lytspel')
# Replace title
title = 'Idea and Motivation'
idea = replace_first_line(idea, '# ' + title)
result['idea'] = PageData(title, markdown_markup(idea))
# We render this file now so it will be inserted into the navbar in the desired place
overview = readfile('docs/lytspel-on-two-pages.md')
overview = replace_first_line(overview, '# Lytspel in Brief')
result['overview'] = PageData('Brief Overview', markdown_markup(overview))
# Returning to the README
rules, rest = split_at(rest, '## International')
result['rules'] = PageData('Complete Ruleset', markdown_markup(rules, move_headers_up=True))
intl, rest = split_at(rest, '## Limitations')
title = extract_title_from_text(intl)
# Add link to earlier page
intl = intl.replace('have already been motivated', '[have already been motivated](/rules)', 1)
result['intl'] = PageData(title, markdown_markup(intl, move_headers_up=True))
limitations = split_at(rest, '## Other')[0]
result['limitations'] = PageData('Dictionary Limitations',
markdown_markup(limitations, move_headers_up=True))
cli = readfile('INSTALL-USE.md')
cli = replace_first_line(cli, '# Command-Line Program')
result['cli'] = PageData('Command-Line Version', markdown_markup(cli))
# Add files from webfiles directory in desired order
for basename in ('privacy', 'contact'):
text = readfile('webfiles/{}.md'.format(basename))
title = extract_title_from_text(text)
result[basename] = PageData(title, markdown_markup(text, protect_email_addresses=True))
return result
def build_sample_dict() -> 'Dict[str, SamplePageData]':
"""Build a dictionary from URLs to the contents of sample pages."""
result = OrderedDict() # type: Dict[str, SamplePageData]
conv = Converter()
for filename in glob('samples/*.md'):
basename = path.splitext(path.basename(filename))[0]
paras = []
title = None
if basename == 'NOTE':
continue # Skip (not a sample)
orig_text = readfile(filename)
orig_paras = re.split('\n\n', orig_text) # Split text into paragraphs
for orig_para in orig_paras:
conv_para = conv.convert_para(orig_para)
paras.append(SamplePara(markdown_markup(orig_para), markdown_markup(conv_para)))
if not title:
# Extract page title from the converted text of the first paragraph
title = extract_title_from_text(conv_para)
result[basename] = SamplePageData(title, paras)
return result
def format_nav_item(url: str, title: str) -> str:
"""Format a single entry for the navigation bar (navbar)."""
return '<li class="nav-item"><a class="nav-link" href="{}">{}</a></li>'.format(url, title)
def render_nav_items(page_dict: 'OrderedDict[str, PageData]',
sample_dict: 'Dict[str, SamplePageData]') -> Markup:
"""Pre-render navbar items for quick insertion into all pages."""
itemlist = [format_nav_item('/', 'Converter')]
for url, page_data in page_dict.items():
itemlist.append(format_nav_item('/' + url, page_data.title))
itemlist.append('<li class="nav-item">'
'<span class="navbar-text navbar-sep border-top border-2 border-info">'
'Samples:</span></li>')
for local_url in sorted(sample_dict.keys()):
sample_page_data = sample_dict[local_url]
itemlist.append(format_nav_item('/sample/' + local_url, sample_page_data.title))
return Markup('\n'.join(itemlist))
##### Constants and immutable values #####
# A mapping from allowed file extensions to their MIME types.
# Note: keep this in sync with the "accept" list in templates/startpage.html.
ALLOWED_EXTENSIONS = {
'epub': 'application/epub+zip',
'htm': 'text/html',
'html': 'text/html',
'markdown': 'text/markdown',
'md': 'text/markdown',
'rst': 'text/x-rst',
'txt': 'text/plain',
'xht': 'application/xhtml+xml',
'xhtml': 'application/xhtml+xml',
'xml': 'application/xml'
}
HOME = path.expanduser('~')
MAX_FILE_SIZE_IN_MB = 10
MAX_FILE_SIZE_IN_B = MAX_FILE_SIZE_IN_MB * 1024 * 1024
# How long (in seconds) HTML pages (except the dynamic start page) should be cached)
HTML_MAX_AGE = 3*60*60 # 3 hours
MULTIPLE_SLASHES_RE = re.compile('//+')
PAGE_DICT = build_page_dict()
SAMPLE_DICT = build_sample_dict()
INTRO_TEXT = markdown_markup(readfile('webfiles/intro.md'))
NAV_ITEMS = render_nav_items(PAGE_DICT, SAMPLE_DICT)
SERVER_SOFTWARE = os.environ.get('SERVER_SOFTWARE', 'flask')
##### App config #####
app = Flask(__name__) # pylint: disable=invalid-name
app.config.from_pyfile('web.cfg')
LOG = create_logger(app)
# Set suitable default values
app.config.setdefault('UPLOAD_FOLDER', HOME + '/webdata/uploads')
# Configure logging
if 'gunicorn' in SERVER_SOFTWARE:
gunicorn_logger = logging.getLogger('gunicorn.error') # pylint: disable=invalid-name
LOG.handlers = gunicorn_logger.handlers
LOG.setLevel(gunicorn_logger.level)
LOG.info('App ready to serve under %s', SERVER_SOFTWARE)
@app.before_request
def normalize_url() -> Optional[Response]:
"""Convert all request URLs to lower-case and strip spurious slashes."""
new_path = orig_path = request.path
first_char, rest = new_path[:1], new_path[1:]
if first_char == '/' and (rest.endswith('/') or rest.startswith('/')):
# Strip any slashes at the end and any (except the very first) at the start
rest = rest.strip('/')
new_path = first_char + rest
if '//' in new_path:
# Replace repeated slashes by a single one
new_path = MULTIPLE_SLASHES_RE.sub('/', new_path)
if not new_path.islower():
# Convert to lower case (we never use upper-case letters in URLs)
new_path = new_path.lower()
if new_path != orig_path:
log_web_event('URL normalization: Redirecting from %s to %s', orig_path, new_path)
return redirect(new_path)
else:
return None # let request pass as is
@app.errorhandler(404)
def page_not_found(err):
"""Return requests for non-existing pages to the start page."""
#pylint: disable=unused-argument
log_web_event('%s: Redirecting to start page', request.path)
return redirect('/')
##### App endpoints #####
@app.route("/", methods=['GET', 'POST'])
def startpage() -> str:
"""Main entry point."""
tradspell = ''
lytspel = ''
if request.method == 'POST':
tradspell = request.form['tradspell']
# Enforce maximum length to prevent DOS attacks
if len(tradspell) > 21000:
tradspell = tradspell[:21000]
if tradspell:
conv = Converter()
lytspel = conv.convert_para(tradspell, False)
log_web_event('/: Converted %d to %d characters', len(tradspell), len(lytspel))
else:
log_web_event()
return render_template('startpage.html',
nav_items=NAV_ITEMS, form=request.form, intro=INTRO_TEXT,
tradspell=tradspell, lytspel=lytspel)
@app.route("/file", methods=['GET', 'POST'])
def convert_file() -> Response:
"""Convert a file."""
# pylint: disable=too-many-return-statements
if request.method == 'POST':
# Check if the post request has the file part
if 'file' not in request.files:
return redirect_with_error('No selected file')
file = request.files['file']
# If user does not select a file, browser submits an empty part without filename
if file.filename == '':
return redirect_with_error('No selected file')
if not allowed_file(file.filename):
return redirect_with_error(
'Unsupported file type (please select a text, HTML, or epub document)')
# Determine file extension and name for output file
source_name = secure_filename(file.filename)
root, ext = path.splitext(source_name)
target_name = '{}-lytspel{}'.format(root, ext)
# Save file locally using a random name (otherwise there might be collisions)
uid = str(uuid4())
upload_folder = app.config['UPLOAD_FOLDER']
in_file_name = '{}-in{}'.format(uid, ext)
in_file_path = path.join(upload_folder, in_file_name)
out_file_name = '{}-out{}'.format(uid, ext)
out_file_path = path.join(upload_folder, out_file_name)
file.save(in_file_path)
# Delete rather than converting files that are too large and return an error message
# (we don't use Flask's MAX_CONTENT_LENGTH setting since that aborts connections
# in a user-unfriendly way)
if os.stat(in_file_path).st_size > MAX_FILE_SIZE_IN_B:
os.remove(in_file_path)
return redirect_with_error(
'File too large (at most {} MB are allowed)'.format(MAX_FILE_SIZE_IN_MB))
# Convert file and offer it for download
conv = Converter()
try:
conv.convert_file(in_file_path, out_file_path)
except Exception as err: # pylint: disable=broad-except
return redirect_with_error('Could not convert file: {}'.format(err))
norm_ext = ext[1:].lower()
log_web_event('/file: Converted %s file with %d bytes to one with %d bytes',
norm_ext, path.getsize(in_file_path), path.getsize(out_file_path))
return send_from_directory(
upload_folder, out_file_name, as_attachment=True, attachment_filename=target_name,
mimetype=ALLOWED_EXTENSIONS.get(norm_ext), cache_timeout=0, add_etags=False)
# GET: redirect to start view
log_web_event('/file GET: Redirecting to start page')
return redirect('/')
@app.route("/favicon.ico", methods=['GET'])
def favicon() -> Response:
"""Redirect old browsers which may expect the favicon in the root."""
log_web_event('/favicon.ico: Redirecting to /static/favicon.ico')
return redirect('/static/favicon.ico')
@app.route("/lytspel-on-two-pages.pdf", methods=['GET'])
def two_page_pdf() -> Response:
"""Serve the requested PDF document."""
log_web_event()
return send_file('../docs/lytspel-on-two-pages.pdf', mimetype='application/pdf')
@app.route("/robots.txt", methods=['GET'])
def robots_txt() -> Response:
"""Serve the robots.txt file."""
log_web_event()
return send_file('../webfiles/robots.txt', mimetype='text/plain')
@app.route("/<localpath>", methods=['GET'])
def doc_page(localpath: str) -> Response:
"""Show a page from the documentation."""
page_data = PAGE_DICT.get(localpath)
if page_data:
log_web_event()
return cacheable(render_template(
'base.html', nav_items=NAV_ITEMS, content=page_data.content, title=page_data.title))
else:
log_web_event('%s: Redirecting to start page', request.path)
return redirect('/') # Redirect to start page
@app.route("/sample/<localpath>", methods=['GET'])
def sample_page(localpath: str) -> Response:
"""Show a text sample."""
page_data = SAMPLE_DICT.get(localpath)
if page_data:
log_web_event()
return cacheable(render_template(
'sample.html', nav_items=NAV_ITEMS, title=page_data.title, paras=page_data.paras))
else:
log_web_event('%s: Redirecting to start page', request.path)
return redirect('/') # Redirect to start page
##### Helper functions #####
def log_web_event(msg: str = None, *args):
"""Log an info message in the context of a web request.
Any 'args' are merged into 'msg' using the string formatting operator. Additionally, the
user agent making the request will be added to the logged message.
If 'msg' is omitted, a default message noticing that the current request path was fetched
will be logged.
"""
# pylint: disable=keyword-arg-before-vararg
if not msg:
msg = '%s fetched'
args = (request.path,)
msg += ' (user agent: %s)'
agent = request.user_agent
agent_string = '{}/{} {}'.format(
agent.platform or '-', agent.browser or '-', agent.version or '-') # type: ignore
if agent_string == '-/- -':
# Log the raw User-Agent header instead (if sent)
agent_string = '"{}"'.format(agent.string or '')
args = (*args, agent_string)
LOG.info(msg, *args)
def redirect_with_error(msg: str, url: str = '/') -> Response:
""""Redirect to specific URL, flashing an error message."""
log_web_event('Error redirect to %s with flash message: %s', url, msg)
flash(msg)
return redirect(url)
def allowed_file(filename: str) -> bool:
"""Check whether an uploaded file has one of the supported extensions."""
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def cacheable(resp_body: str, cache_timeout: int = HTML_MAX_AGE) -> Response:
"""Add Cache-Control headers to a response, indicating that it may be cached."""
resp = make_response(resp_body)
resp.cache_control.public = True
resp.cache_control.max_age = cache_timeout
resp.expires = int(time() + cache_timeout)
return resp
if __name__ == '__main__':
app.run()
| |
"""Monkeypatching and mocking functionality."""
import os
import re
import sys
import warnings
from contextlib import contextmanager
from typing import Any
from typing import Generator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import overload
from typing import Tuple
from typing import TypeVar
from typing import Union
from _pytest.compat import final
from _pytest.fixtures import fixture
from _pytest.warning_types import PytestWarning
RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$")
K = TypeVar("K")
V = TypeVar("V")
@fixture
def monkeypatch() -> Generator["MonkeyPatch", None, None]:
"""A convenient fixture for monkey-patching.
The fixture provides these methods to modify objects, dictionaries or
os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting test function or
fixture has finished. The ``raising`` parameter determines if a KeyError
or AttributeError will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name: str) -> object:
# Simplified from zope.dottedname.
parts = name.split(".")
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += "." + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# We use explicit un-nesting of the handling block in order
# to avoid nested exceptions.
try:
__import__(used)
except ImportError as ex:
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError(f"import error in {used}: {ex}") from ex
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj: object, name: str, ann: str) -> object:
try:
obj = getattr(obj, name)
except AttributeError as e:
raise AttributeError(
"{!r} object at {} has no attribute {!r}".format(
type(obj).__name__, ann, name
)
) from e
return obj
def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]:
if not isinstance(import_path, str) or "." not in import_path:
raise TypeError(f"must be absolute import path string, not {import_path!r}")
module, attr = import_path.rsplit(".", 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset:
def __repr__(self) -> str:
return "<notset>"
notset = Notset()
@final
class MonkeyPatch:
"""Helper to conveniently monkeypatch attributes/items/environment
variables/syspath.
Returned by the :fixture:`monkeypatch` fixture.
:versionchanged:: 6.2
Can now also be used directly as `pytest.MonkeyPatch()`, for when
the fixture is not available. In this case, use
:meth:`with MonkeyPatch.context() as mp: <context>` or remember to call
:meth:`undo` explicitly.
"""
def __init__(self) -> None:
self._setattr: List[Tuple[object, str, object]] = []
self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = []
self._cwd: Optional[str] = None
self._savesyspath: Optional[List[str]] = None
@classmethod
@contextmanager
def context(cls) -> Generator["MonkeyPatch", None, None]:
"""Context manager that returns a new :class:`MonkeyPatch` object
which undoes any patching done inside the ``with`` block upon exit.
Example:
.. code-block:: python
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_.
"""
m = cls()
try:
yield m
finally:
m.undo()
@overload
def setattr(
self,
target: str,
name: object,
value: Notset = ...,
raising: bool = ...,
) -> None:
...
@overload
def setattr(
self,
target: object,
name: str,
value: object,
raising: bool = ...,
) -> None:
...
def setattr(
self,
target: Union[str, object],
name: Union[object, str],
value: object = notset,
raising: bool = True,
) -> None:
"""Set attribute value on target, memorizing the old value.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. For example,
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
Raises AttributeError if the attribute does not exist, unless
``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
if isinstance(value, Notset):
if not isinstance(target, str):
raise TypeError(
"use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string"
)
value = name
name, target = derive_importpath(target, raising)
else:
if not isinstance(name, str):
raise TypeError(
"use setattr(target, name, value) with name being a string or "
"setattr(target, value) with target being a dotted "
"import string"
)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError(f"{target!r} has no attribute {name!r}")
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(
self,
target: Union[object, str],
name: Union[str, Notset] = notset,
raising: bool = True,
) -> None:
"""Delete attribute ``name`` from ``target``.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
Raises AttributeError it the attribute does not exist, unless
``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
if isinstance(name, Notset):
if not isinstance(target, str):
raise TypeError(
"use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string"
)
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
oldval = getattr(target, name, notset)
# Avoid class descriptors like staticmethod/classmethod.
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
delattr(target, name)
def setitem(self, dic: MutableMapping[K, V], name: K, value: V) -> None:
"""Set dictionary entry ``name`` to value."""
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic: MutableMapping[K, V], name: K, raising: bool = True) -> None:
"""Delete ``name`` from dict.
Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to
False.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None:
"""Set environment variable ``name`` to ``value``.
If ``prepend`` is a character, read the current environment variable
value and prepend the ``value`` adjoined with the ``prepend``
character.
"""
if not isinstance(value, str):
warnings.warn( # type: ignore[unreachable]
PytestWarning(
"Value of environment variable {name} type should be str, but got "
"{value!r} (type: {type}); converted to str implicitly".format(
name=name, value=value, type=type(value).__name__
)
),
stacklevel=2,
)
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name: str, raising: bool = True) -> None:
"""Delete ``name`` from the environment.
Raises ``KeyError`` if it does not exist, unless ``raising`` is set to
False.
"""
environ: MutableMapping[str, str] = os.environ
self.delitem(environ, name, raising=raising)
def syspath_prepend(self, path) -> None:
"""Prepend ``path`` to ``sys.path`` list of import locations."""
from pkg_resources import fixup_namespace_packages
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
# https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
fixup_namespace_packages(str(path))
# A call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches.
# This is especially important when any namespace package is in use,
# since then the mtime based FileFinder cache (that gets created in
# this case already) gets not invalidated when writing the new files
# quickly afterwards.
from importlib import invalidate_caches
invalidate_caches()
def chdir(self, path: Union[str, "os.PathLike[str]"]) -> None:
"""Change the current working directory to the specified path.
Path can be a string or a path object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
os.chdir(path)
def undo(self) -> None:
"""Undo previous changes.
This call consumes the undo stack. Calling it a second time has no
effect unless you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, key, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[key]
except KeyError:
pass # Was already deleted, so we have the desired state.
else:
dictionary[key] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
| |
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import os
import sys
import signal
import warnings
from pickle import load, HIGHEST_PROTOCOL
from billiard import util
from billiard import process
from billiard.five import int_types
from .compat import _winapi as win32
from .reduction import dump
__all__ = ['Popen', 'assert_spawning', 'exit',
'duplicate', 'close']
try:
WindowsError = WindowsError # noqa
except NameError:
class WindowsError(Exception): # noqa
pass
W_OLD_DJANGO_LAYOUT = """\
Will add directory %r to path! This is necessary to accommodate \
pre-Django 1.4 layouts using setup_environ.
You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \
environment variable.
"""
#
# Choose whether to do a fork or spawn (fork+exec) on Unix.
# This affects how some shared resources should be created.
#
_forking_is_enabled = sys.platform != 'win32'
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Unix
#
if sys.platform != 'win32':
try:
import thread
except ImportError:
import _thread as thread # noqa
import select
WINEXE = False
WINSERVICE = False
exit = os._exit
duplicate = os.dup
close = os.close
_select = util._eintr_retry(select.select)
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
_tls = thread._local()
def __init__(self, process_obj):
# register reducers
from billiard import connection # noqa
_Django_old_layout_hack__save()
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
r, w = os.pipe()
self.sentinel = r
if _forking_is_enabled:
self.pid = os.fork()
if self.pid == 0:
os.close(r)
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
os._exit(code)
else:
from_parent_fd, to_child_fd = os.pipe()
cmd = get_command_line() + [str(from_parent_fd)]
self.pid = os.fork()
if self.pid == 0:
os.close(r)
os.close(to_child_fd)
os.execv(sys.executable, cmd)
# send information to child
prep_data = get_preparation_data(process_obj._name)
os.close(from_parent_fd)
to_child = os.fdopen(to_child_fd, 'wb')
Popen._tls.process_handle = self.pid
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del(Popen._tls.process_handle)
to_child.close()
# `w` will be closed when the child exits, at which point `r`
# will become ready for reading (using e.g. select()).
os.close(w)
util.Finalize(self, os.close, (r,))
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
r = _select([self.sentinel], [], [], timeout)[0]
if not r:
return None
# This shouldn't block if select() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
if _forking_is_enabled:
return False
else:
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return handle
#
# Windows
#
else:
try:
import thread
except ImportError:
import _thread as thread # noqa
import msvcrt
try:
import _subprocess
except ImportError:
import _winapi as _subprocess # noqa
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
exit = win32.ExitProcess
close = win32.CloseHandle
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
h = _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
)
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and sys.version_info[1] < 3):
h = h.Detach()
return h
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
_Django_old_layout_hack__save()
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
close(ht) if isinstance(ht, int_types) else ht.Close()
(close(rhandle) if isinstance(rhandle, int_types)
else rhandle.Close())
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--billiard-fork':
assert len(argv) == 3
os.environ["FORKED_BY_MULTIPROCESSING"] = "1"
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity == () and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that have forgotten to use the proper
idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--billiard-fork']
else:
prog = 'from billiard.forking import main; main()'
return [_python_exe, '-c', prog, '--billiard-fork']
def _Django_old_layout_hack__save():
if 'DJANGO_PROJECT_DIR' not in os.environ:
try:
settings_name = os.environ['DJANGO_SETTINGS_MODULE']
except KeyError:
return # not using Django.
conf_settings = sys.modules.get('django.conf.settings')
configured = conf_settings and conf_settings.configured
try:
project_name, _ = settings_name.split('.', 1)
except ValueError:
return # not modified by setup_environ
project = __import__(project_name)
try:
project_dir = os.path.normpath(_module_parent_dir(project))
except AttributeError:
return # dynamically generated module (no __file__)
if configured:
warnings.warn(UserWarning(
W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir)
))
os.environ['DJANGO_PROJECT_DIR'] = project_dir
def _Django_old_layout_hack__load():
try:
sys.path.append(os.environ['DJANGO_PROJECT_DIR'])
except KeyError:
pass
def _module_parent_dir(mod):
dir, filename = os.path.split(_module_dir(mod))
if dir == os.curdir or not dir:
dir = os.getcwd()
return dir
def _module_dir(mod):
if '__init__.py' in mod.__file__:
return os.path.dirname(mod.__file__)
return mod.__file__
def main():
'''
Run code specifed by data received over pipe
'''
global _forking_is_enabled
_Django_old_layout_hack__load()
assert is_forking(sys.argv)
_forking_is_enabled = False
handle = int(sys.argv[-1])
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
else:
fd = handle
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
# Huge hack to make logging before Process.run work.
try:
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
except KeyError:
pass
except AttributeError:
pass
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
format = os.environ.get("_MP_FORK_LOGFORMAT_")
if loglevel:
from billiard import util
import logging
logger = util.get_logger()
logger.setLevel(int(loglevel))
if not logger.handlers:
logger._rudimentary_setup = True
logfile = logfile or sys.__stderr__
if hasattr(logfile, "write"):
handler = logging.StreamHandler(logfile)
else:
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(
format or util.DEFAULT_LOGGING_FORMAT,
)
handler.setFormatter(formatter)
logger.addHandler(handler)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from billiard.util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE and not WINSERVICE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in list(main_module.__dict__.values()):
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
| |
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| |
"""
Management of addresses and names in hosts file
===============================================
The ``/etc/hosts`` file can be managed to contain definitions for specific hosts:
.. code-block:: yaml
salt-master:
host.present:
- ip: 192.168.0.42
Or using the ``names`` directive, you can put several names for the same IP.
(Do not try one name with space-separated values).
.. code-block:: yaml
server1:
host.present:
- ip: 192.168.0.42
- names:
- server1
- florida
.. note::
Changing the ``names`` in ``host.present`` does not cause an
update to remove the old entry.
.. code-block:: yaml
server1:
host.present:
- ip:
- 192.168.0.42
- 192.168.0.43
- 192.168.0.44
- names:
- server1
You can replace all existing names for a particular IP address:
.. code-block:: yaml
127.0.1.1:
host.only:
- hostnames:
- foo.example.com
- foo
Or delete all existing names for an address:
.. code-block:: yaml
203.0.113.25:
host.only:
- hostnames: []
You can also include comments:
.. code-block:: yaml
server1:
host.present:
- ip: 192.168.0.42
- names:
- server1
- florida
- comment: A very important comment
"""
import logging
import salt.utils.validate.net
log = logging.getLogger(__name__)
def present(name, ip, comment="", clean=False): # pylint: disable=C0103
"""
Ensures that the named host is present with the given ip
name
The host to assign an ip to
ip
The ip addr(s) to apply to the host. Can be a single IP or a list of IP
addresses.
comment
A comment to include for the host entry
.. versionadded:: 3001
clean
Remove any entries which don't match those configured in the ``ip``
option. Default is ``False``.
.. versionadded:: 2018.3.4
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not isinstance(ip, list):
ip = [ip]
all_hosts = __salt__["hosts.list_hosts"]()
comments = []
to_add = set()
to_remove = set()
update_comment = set()
# First check for IPs not currently in the hosts file
to_add.update([(addr, name) for addr in ip if addr not in all_hosts])
if comment:
update_comment.update([(addr, comment) for addr in ip if addr not in all_hosts])
# Now sweep through the hosts file and look for entries matching either the
# IP address(es) or hostname.
for addr, host_info in all_hosts.items():
if addr not in ip:
if "aliases" in host_info and name in host_info["aliases"]:
# Found match for hostname, but the corresponding IP is not in
# our list, so we need to remove it.
if clean:
to_remove.add((addr, name))
else:
ret.setdefault("warnings", []).append(
"Host {0} present for IP address {1}. To get rid of "
"this warning, either run this state with 'clean' "
"set to True to remove {0} from {1}, or add {1} to "
"the 'ip' argument.".format(name, addr)
)
else:
if "aliases" in host_info and name in host_info["aliases"]:
if (
comment
and "comment" in host_info
and host_info["comment"] != comment
):
update_comment.add((addr, comment))
elif comment and "comment" not in host_info:
update_comment.add((addr, comment))
else:
# No changes needed for this IP address and hostname
comments.append("Host {} ({}) already present".format(name, addr))
else:
# IP address listed in hosts file, but hostname is not present.
# We will need to add it.
if salt.utils.validate.net.ip_addr(addr):
to_add.add((addr, name))
if comment:
update_comment.add((addr, comment))
else:
ret["result"] = False
comments.append("Invalid IP Address for {} ({})".format(name, addr))
for addr, name in to_add:
if __opts__["test"]:
ret["result"] = None
comments.append("Host {} ({}) would be added".format(name, addr))
else:
if __salt__["hosts.add_host"](addr, name):
comments.append("Added host {} ({})".format(name, addr))
else:
ret["result"] = False
comments.append("Failed to add host {} ({})".format(name, addr))
continue
ret["changes"].setdefault("added", {}).setdefault(addr, []).append(name)
for addr, comment in update_comment:
if __opts__["test"]:
comments.append("Comment for {} ({}) would be added".format(addr, comment))
else:
if __salt__["hosts.set_comment"](addr, comment):
comments.append("Set comment for host {} ({})".format(addr, comment))
else:
ret["result"] = False
comments.append(
"Failed to add comment for host {} ({})".format(addr, comment)
)
continue
ret["changes"].setdefault("comment_added", {}).setdefault(addr, []).append(
comment
)
for addr, name in to_remove:
if __opts__["test"]:
ret["result"] = None
comments.append("Host {} ({}) would be removed".format(name, addr))
else:
if __salt__["hosts.rm_host"](addr, name):
comments.append("Removed host {} ({})".format(name, addr))
else:
ret["result"] = False
comments.append("Failed to remove host {} ({})".format(name, addr))
continue
ret["changes"].setdefault("removed", {}).setdefault(addr, []).append(name)
ret["comment"] = "\n".join(comments)
return ret
def absent(name, ip): # pylint: disable=C0103
"""
Ensure that the named host is absent
name
The host to remove
ip
The ip addr(s) of the host to remove
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if not isinstance(ip, list):
ip = [ip]
comments = []
for _ip in ip:
if not __salt__["hosts.has_pair"](_ip, name):
ret["result"] = True
comments.append("Host {} ({}) already absent".format(name, _ip))
else:
if __opts__["test"]:
comments.append("Host {} ({}) needs to be removed".format(name, _ip))
else:
if __salt__["hosts.rm_host"](_ip, name):
ret["changes"] = {"host": name}
ret["result"] = True
comments.append("Removed host {} ({})".format(name, _ip))
else:
ret["result"] = False
comments.append("Failed to remove host")
ret["comment"] = "\n".join(comments)
return ret
def only(name, hostnames):
"""
Ensure that only the given hostnames are associated with the
given IP address.
.. versionadded:: 2016.3.0
name
The IP address to associate with the given hostnames.
hostnames
Either a single hostname or a list of hostnames to associate
with the given IP address in the given order. Any other
hostname associated with the IP address is removed. If no
hostnames are specified, all hostnames associated with the
given IP address are removed.
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if isinstance(hostnames, str):
hostnames = [hostnames]
old = " ".join(__salt__["hosts.get_alias"](name))
new = " ".join(x.strip() for x in hostnames)
if old == new:
ret["comment"] = 'IP address {} already set to "{}"'.format(name, new)
ret["result"] = True
return ret
if __opts__["test"]:
ret["comment"] = 'Would change {} from "{}" to "{}"'.format(name, old, new)
return ret
ret["result"] = __salt__["hosts.set_host"](name, new)
if not ret["result"]:
ret["comment"] = 'hosts.set_host failed to change {} from "{}" to "{}"'.format(
name, old, new
)
return ret
ret["comment"] = 'successfully changed {} from "{}" to "{}"'.format(name, old, new)
ret["changes"] = {name: {"old": old, "new": new}}
return ret
| |
from models import Collection, Field, Deck, Decks_Cards, Card, Cards_Fields, Users_Collections
from django.forms.extras.widgets import SelectDateWidget
from . import services
from django import forms
from django.forms.utils import ErrorList
from django.db import transaction
import logging
import datetime
import json
import tempfile
import os
class CollectionForm(forms.ModelForm):
class Meta:
model = Collection
fields = ['title', 'card_template', 'published'] #'private'
def __init__(self, *args, **kwargs):
card_template_query_set = None
if 'query_set' in kwargs:
card_template_query_set = kwargs.get('query_set', None)
del kwargs['query_set']
super(CollectionForm, self).__init__(*args, **kwargs)
if card_template_query_set is not None:
self.fields['card_template'].queryset = card_template_query_set
def clean_deck_order(self):
"""
Cleans and validates the JSON POSTed in the deck_order field.
This field describes how decks should be sorted in the collection.
Errors are manually added to the errorlist because this is a custom field.
"""
field = 'deck_order'
deck_data = []
errstr = ''
errors = ErrorList()
if field in self.data:
deck_order = json.loads(self.data[field])
if 'data' in deck_order:
deck_data = deck_order['data']
for d in deck_data:
if ('deck_id' in d and 'sort_order' in d):
try:
int(d['sort_order'])
except ValueError:
errstr = "deck %s has invalid sort value: %s" % (d['deck_id'], d['sort_order'])
errors.append(errstr)
else:
errstr = "deck_id and sort_order required"
errors.append(errstr)
break
if errors:
self._errors.setdefault(field, errors)
raise forms.ValidationError("Deck order field has errors")
self.cleaned_data['deck_order'] = deck_data
def clean(self):
"""Overrides the form clean() so that it also cleans the hidden deck_order field."""
self.clean_deck_order()
return super(CollectionForm, self).clean()
@transaction.atomic
def save_deck_order(self, deck_order):
"""Saves the new ordering."""
for d in deck_order:
deck = Deck.objects.get(pk=d['deck_id'])
deck.sort_order = d['sort_order']
deck.save()
def save(self):
"""Overrides the form save() so that the deck ordering is saved as well."""
if self.cleaned_data['deck_order']:
self.save_deck_order(self.cleaned_data['deck_order'])
return super(CollectionForm, self).save()
class CollectionShareForm(forms.Form):
#role = forms.ChoiceField(choices=Users_Collections.ROLES, initial=Users_Collections.OBSERVER)
expired_in = forms.DateField(widget=SelectDateWidget(), initial=datetime.datetime.now()+datetime.timedelta(days=365))
class FieldForm(forms.ModelForm):
class Meta:
model = Field
fields = ['field_type', 'display', 'label']
class DeckForm(forms.ModelForm):
class Meta:
model = Deck
fields = ['title']
class DeckImportForm(forms.Form):
file = forms.FileField(required=False)
class CardEditForm(forms.Form):
card_color = forms.ChoiceField(choices=Card.COLOR_CHOICES, initial=Card.DEFAULT_COLOR, required=True)
card = None
deck = None
card_fields = []
field_prefix = 'field_'
def __init__(self, *args, **kwargs):
"""Initializes the form."""
self.card_fields = kwargs.pop('card_fields', []) # custom: defines fields in this form
super(CardEditForm, self).__init__(*args, **kwargs)
# add the fields to the form
for card_field in self.card_fields:
field_name = self.field_prefix + str(card_field.id)
self.fields[field_name] = forms.CharField(required=False)
if card_field.field_type in ('I','A') and field_name in self.data:
del self.data[field_name] # REMOVE from data because handled separately from normal fields
# initialize model objects
self.deck = Deck.objects.get(id=self.data['deck_id'])
if self.data['card_id']:
self.card = Card.objects.get(id=self.data['card_id'])
else:
self.card = services.create_card_in_deck(self.deck)
#print "data", self.data, "fields", self.fields, "files", self.files
def is_valid(self):
self._check_file_errors()
return super(CardEditForm, self).is_valid()
def save(self):
"""Saves the card with all of its fields and attributes."""
self._save_card_fields()
self._save_card_files()
self._save_card_data()
def get_card(self):
"""Returns Card instance."""
return self.card
def get_deck(self):
"""Returns Deck instance."""
return self.deck
def _save_card_fields(self):
field_list = []
for field_name, field_value in self.data.items():
is_field = field_name.startswith(self.field_prefix)
if not is_field:
continue
field_id = field_name.replace(self.field_prefix, '')
if not field_id.isdigit():
continue
field_list.append({"field_id": int(field_id), "value": field_value})
if len(field_list) > 0:
services.update_card_fields(self.card, field_list)
def _save_card_data(self):
self.card.color = self.cleaned_data['card_color']
self.card.save()
def _save_card_files(self):
field_list = []
for field_name, field_value in self.files.items():
is_field = field_name.startswith(self.field_prefix)
if not is_field:
continue
field_id = field_name.replace(self.field_prefix, '')
if not field_id.isdigit():
continue
if self.files[field_name].size > 0:
path = services.handle_uploaded_media_file(self.files[field_name], self._get_field_type(field_name))
field_list.append({"field_id":int(field_id), "value": path})
if len(field_list) > 0:
services.update_card_fields(self.card, field_list)
def _get_field_type(self, find_field_name):
for card_field in self.card_fields:
field_name = self.field_prefix + str(card_field.id)
if find_field_name == field_name:
return card_field.field_type
return None
def _check_file_errors(self):
for field_name in self.files:
field_type = self._get_field_type(field_name)
if "I" == field_type:
is_valid_type, errstr = services.valid_image_file_type(self.files[field_name])
if not is_valid_type:
self.errors[field_name] = "Invalid image file. Must be a valid image type (i.e jpg, png, gif, etc). Error: %s" % errstr
elif "A" == field_type:
with tempfile.NamedTemporaryFile(mode='r+', suffix='.mp3') as tf:
file_contents = self.files[field_name].read()
tf.write(file_contents)
tf.seek(0)
is_valid_type, errstr = services.valid_audio_file_type(tf.name)
errstr = ""
if not is_valid_type:
self.errors[field_name] = "Invalid audio file. Must be a valid mp3. Error: %s" % errstr
| |
"""
The MIT License (MIT)
Copyright (c) 2016 Ilhan Polat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
__all__ = ['haroldsvd', 'haroldker', 'pair_complex_numbers', 'e_i',
'matrix_slice']
def haroldsvd(A, also_rank=False, rank_tol=None):
"""
This is a wrapper/container function of both the SVD decomposition
and the rank computation. Since the regular rank computation is
implemented via SVD it doesn't make too much sense to recompute
the SVD if we already have the rank information. Thus instead of
typing two commands back to back for both the SVD and rank, we
return both. To reduce the clutter, the rank information is supressed
by default.
numpy svd is a bit strange because it compresses and looses the
S matrix structure. From the manual, it is advised to use
u.dot(np.diag(s).dot(v)) for recovering the original matrix. But
that won't work for rectangular matrices. Hence it recreates the
rectangular S matrix of U,S,V triplet.
Parameters
----------
A : (m,n) array_like
Matrix to be decomposed
also_rank : bool, optional
Whether the rank of the matrix should also be reported or not.
The returned rank is computed via the definition taken from the
official numpy.linalg.matrix_rank and appended here.
rank_tol : {None,float} optional
The tolerance used for deciding the numerical rank. The default
is set to None and uses the default definition of matrix_rank()
from numpy.
Returns
-------
U,S,V : {(m,m),(m,n),(n,n)} array_like
Decomposed-form matrices
r : integer
If the boolean "also_rank" is true, this variable is the numerical
rank of the matrix D
"""
try:
A = np.atleast_2d(np.array(A))
except TypeError:
raise TypeError('Incompatible argument, use either list of lists'
'or native numpy arrays for svd.')
except ValueError:
raise ValueError('The argument cannot be cast as an array.')
p, m = A.shape
u, s, v = np.linalg.svd(A, full_matrices=True)
diags = np.zeros((p, m)) # Reallocate the s matrix of u,s,v
for index, svalue in enumerate(s): # Repopulate the diagoanal with svds
diags[index, index] = svalue
if also_rank: # Copy the official rank computation
if rank_tol is None:
rank_tol = s.max() * max(p, m) * np.spacing(1.)
r = sum(s > rank_tol)
return u, diags, v, r
return u, diags, v
def haroldker(N, side='right'):
"""
This function is a straightforward basis computation for the right/left
nullspace for rank deficient or fat/tall matrices.
It simply returns the remaining columns of the right factor of the
singular value decomposition whenever applicable. Otherwise returns
a zero vector such that it has the same number of rows as the columns
of the argument, hence the dot product makes sense.
The basis columns have unity 2-norm except for the trivial zeros.
Parameters
----------
N : (m,n) array_like
Matrix for which the nullspace basis to be computed
side : {'right','left'} string
The switch for the right or left nullspace computation.
Returns
-------
Nn : (n,dim) array_like
Basis for the nullspace. dim is the dimension of the nullspace. If
the nullspace is trivial then dim is 1 for consistent 2D array output
"""
if side not in ('left', 'right'):
raise ValueError('side keyword only takes "left,right" as arguments')
try:
A = np.atleast_2d(np.array(N))
except TypeError:
raise TypeError('Incompatible argument, use either list of lists'
'or native numpy arrays for svd.')
except ValueError:
raise ValueError('The argument cannot be cast as an array.')
if side == 'left':
A = A.conj().T
m, n = A.shape
if A.size <= 1:
# don't bother
return np.array([[0]])
V, r = haroldsvd(A, also_rank=True)[2:]
if r == min(m, n) and m >= n:
# If full rank and not fat, return trivial zero
return np.zeros((A.shape[1], 1))
else:
return V[:, r:]
def pair_complex_numbers(a, tol=1e-9, realness_tol=1e-9,
positives_first=False, reals_first=True):
"""
Given an array-like somearray, it first tests and clears out small
imaginary parts via `numpy.real_if_close`. Then pairs complex numbers
together as consecutive entries. A real array is returned as is.
Parameters
----------
a : array_like
Array like object needs to be paired
tol: float
The sensitivity threshold for the real and complex parts to be
assumed as equal.
realness_tol: float
The sensitivity threshold for the complex parts to be assumed
as zero.
positives_first: bool
The boolean that defines whether the positive complex part
should come first for the conjugate pairs
reals_first: bool
The boolean that defines whether the real numbers are at the
beginning or the end of the resulting array.
Returns
-------
paired_array : ndarray
The resulting paired array
"""
try:
array_r_j = np.array(a, dtype='complex').flatten()
except:
raise ValueError('Something in the argument array prevents me to '
'convert the entries to complex numbers.')
# is there anything to pair?
if array_r_j.size == 0:
return np.array([], dtype='complex')
# is the array 1D or more?
if array_r_j.ndim > 1 and np.min(array_r_j.shape) > 1:
raise ValueError('Currently, I can\'t deal with matrices, so I '
'need 1D arrays.')
# A shortcut for splitting a complex array into real and imag parts
def return_imre(arr):
return np.real(arr), np.imag(arr)
# a close to realness function that operates element-wise
real_if_close_array = np.vectorize(
lambda x: np.real_if_close(x, realness_tol), otypes=[np.complex_],
doc='Elementwise numpy.real_if_close')
array_r_j = real_if_close_array(array_r_j)
array_r, array_j = return_imre(array_r_j)
# are there any complex numbers to begin with or all reals?
# if not kick the argument back as a real array
imagness = np.abs(array_j) >= realness_tol
# perform the imaginary entry separation once
array_j_ent = array_r_j[imagness]
num_j_ent = array_j_ent.size
if num_j_ent == 0:
# If no complex entries exist sort and return unstable first
return np.sort(array_r)
elif num_j_ent % 2 != 0:
# Check to make sure there are even number of complex numbers
# Otherwise stop with "odd number --> no pair" error.
raise ValueError('There are odd number of complex numbers to '
'be paired!')
else:
# Still doesn't distinguish whether they are pairable or not.
sorted_array_r_j = np.sort_complex(array_j_ent)
sorted_array_r, sorted_array_j = return_imre(sorted_array_r_j)
# Since the entries are now sorted and appear as pairs,
# when summed with the next element the result should
# be very small
if any(np.abs(sorted_array_r[:-1:2] - sorted_array_r[1::2]) > tol):
# if any difference is bigger than the tolerance
raise ValueError('Pairing failed for the real parts.')
# Now we have sorted the real parts and they appear in pairs.
# Next, we have to get rid of the repeated imaginary, if any,
# parts in the --... , ++... pattern due to sorting. Note
# that the non-repeated imaginary parts now have the pattern
# -,+,-,+,-,... and so on. So we can check whether sign is
# not alternating for the existence of the repeatedness.
def repeat_sign_test(myarr, mylen):
# Since we separated the zero imaginary parts now any sign
# info is either -1 or 1. Hence we can test whether -1,1
# pattern is present. Otherwise we count how many -1s occured
# double it for the repeated region. Then repeat until we
# we exhaust the array with a generator.
x = 0
myarr_sign = np.sign(myarr).astype(int)
while x < mylen:
if np.array_equal(myarr_sign[x:x+2], [-1, 1]):
x += 2
elif np.array_equal(myarr_sign[x:x+2], [1, -1]):
myarr[x:x+2] *= -1
x += 2
else:
still_neg = True
xl = x+2
while still_neg:
if myarr_sign[xl] == 1:
still_neg = False
else:
xl += 1
yield x, xl - x
x += 2*(xl - x)
for ind, l in repeat_sign_test(sorted_array_j, num_j_ent):
indices = np.dstack(
(range(ind, ind+l), range(ind+2*l-1, ind+l-1, -1))
)[0].reshape(1, -1)
sorted_array_j[ind:ind+2*l] = sorted_array_j[indices]
if any(np.abs(sorted_array_j[:-1:2] + sorted_array_j[1::2]) > tol):
# if any difference is bigger than the tolerance
raise ValueError('Pairing failed for the complex parts.')
# Finally we have a properly sorted pairs of complex numbers
# We can now combine the real and complex parts depending on
# the choice of positives_first keyword argument
# Force entries to be the same for each of the pairs.
sorted_array_j = np.repeat(sorted_array_j[::2], 2)
paired_cmplx_part = np.repeat(sorted_array_r[::2], 2).astype(complex)
if positives_first:
sorted_array_j[::2] *= -1
else:
sorted_array_j[1::2] *= -1
paired_cmplx_part += sorted_array_j*1j
if reals_first:
return np.r_[np.sort(array_r_j[~imagness]), paired_cmplx_part]
else:
return np.r_[paired_cmplx_part, np.sort(array_r_j[~imagness])]
def e_i(width, nth=0, output='c'):
"""
Returns the ``nth`` column(s) of the identity matrix with shape
``(width,width)``. Slicing is permitted with the ``nth`` parameter.
The output is returned without slicing an intermediate identity matrix
hence can be used without allocating the whole array.
Parameters
----------
width : int
The size of the identity matrix from which the columns are taken
nth : 1D index array
A sequence/index expression that selects the requested columns/rows
of the identity matrix. The index starts with zero denoting the first
column.
output : str
This switches the shape of the output; if ``'r'`` is given then
the rows are returned. The default is ``'c'`` which returns columns
Returns
-------
E : ndarray
The resulting row/column subset of the identity matrix
Examples
--------
>>> e_i(7, 5, output='r') # The 5th row of 7x7 identity matrix
array([[ 0., 0., 0., 0., 0., 1., 0.]])
>>> e_i(5, [0, 4, 4, 4, 1]) # Sequences can also be used
array([[ 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 0.]])
>>> e_i(5,np.s_[1:3]) # or NumPy index expressions
array([[ 0., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 0.],
[ 0., 0.]])
>>> e_i(5,slice(1,5,2),output='r') # or Python slice objects
array([[ 0., 1., 0., 0., 0.],
[ 0., 0., 0., 1., 0.]])
"""
col_inds = np.atleast_1d(np.arange(width)[nth])
m = col_inds.size
E = np.zeros((width, m)) if output == 'c' else np.zeros((m, width))
if output == 'c':
for ind, x in enumerate(col_inds):
E[x, ind] = 1
else:
for ind, x in enumerate(col_inds):
E[ind, x] = 1
return E
def matrix_slice(M, corner_shape, corner='nw'):
"""
Takes a two dimensional array ``M`` and slices into four parts dictated
by the ``corner_shape`` and the corner string ``corner``. ::
m n
p [ A | B ]
[-------]
q [ C | D ]
If the given corner and the shape is the whole array then the remaining
arrays are returned as empty arrays, ``numpy.array([])``.
Parameters
----------
M : ndarray
2D input matrix
corner_shape : tuple
An integer valued 2-tuple for the shape of the corner
corner : str
Defines which corner should be used to start slicing. Possible
options are the compass abbreviations: ``'nw', 'ne', 'sw', 'se'``.
The default is the north-west corner.
Returns
-------
A : ndarray
Upper left corner slice
B : ndarray
Upper right corner slice
C : ndarray
Lower left corner slice
D : ndarray
Lower right corner slice
Examples
--------
>>> A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> matrix_slice(A,(1,1))
(array([[1]]),
array([[2, 3]]),
array([[4],
[7]]),
array([[5, 6],
[8, 9]])
)
>>> matrix_slice(A, (2,2), 'sw')
(array([[1, 2]]),
array([[3]]),
array([[4, 5],
[7, 8]]),
array([[6],
[9]])
)
>>> matrix_slice(A, (0, 0)) % empty A
(array([], shape=(0, 0), dtype=int32),
array([], shape=(0, 3), dtype=int32),
array([], shape=(3, 0), dtype=int32),
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]))
"""
if corner not in ('ne', 'nw', 'se', 'sw'):
raise ValueError('The corner string needs to be one of'
'"ne, nw, se, sw".')
x, y = M.shape
z, w = corner_shape
if corner == 'nw':
p, m = z, w
elif corner == 'ne':
p, m = x, y - w
elif corner == 'sw':
p, m = x - z, w
else:
p, m = x - z, y - w
return M[:p, :m], M[:p, m:], M[p:, :m], M[p:, m:]
| |
# -*- coding: utf-8 -*-
#
# This source file is part of the FabSim software toolkit, which is distributed under the BSD 3-Clause license.
# Please refer to LICENSE for detailed information regarding the licensing.
#
# This file contains FabSim definitions specific to FabBioMD.
from ..fab import *
# Add local script, blackbox and template path.
add_local_paths("BioMD")
@task
def namd(config,**args):
"""Submit a NAMD job to the remote queue.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
images : number of images to take
steering : steering session i.d.
wall_time : wall-time job limit
memory : memory per node
"""
if not args.get('cores'):
args["cores"] = 32
update_environment(args)
with_config(config)
execute(put_configs,config)
job(dict(script='namd',
wall_time='1:00:00',memory='2G',job_type='parallel',job_class='micro'),args)
@task
def bac_namd_archerlike(config,**args):
"""Submit ensemble NAMD equilibration-simulation jobs to the ARCHER or similar machines.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
stages : this is usually 11 for equilibration (WT case) and 4 for simulation
wall_time : wall-time job limit
memory : memory per node
"""
if not args.get('cores'):
args["cores"] = 2400
update_environment(args)
with_config(config)
execute(put_configs,config)
job(dict(script=env.bac_ensemble_namd_script,
stages_eq=11, stages_sim=1, replicas=25, wall_time='24:00:00',memory='2G'),args)
@task
def bac_namd_hartreelike(config,**args):
"""Submits ensemble NAMD equilibration-simulation jobs to HARTREE or similar machines.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
stages : this is usually 11 for equilibration (WT case) and 4 for simulation
wall_time : wall-time job limit
memory : memory per node
mem : memory of nodes requested for Bluewonder Phase 1. By default is 32000,
but higher memory nodes can be requested by using other values. For eg. use 64000 for >64 GB memory nodes.
"""
if not args.get('cores'):
args["cores"] = 384
update_environment(args)
if not env.get('replicas'):
env.update(dict(replicas=25))
print "WARNING: replicas argument not specified. Setting a default value of", env.replicas
# sys.exit()
with_config(config)
execute(put_configs,config)
for ri in xrange(1,int(env.replicas)+1):
job(dict(script=env.bac_ensemble_namd_script,
stages_eq=11, stages_sim=1, wall_time='6:00', memory='2G', mem=25000, replicas=env.replicas, replica_index=ri),args)
@task
def bac_ties_archerlike(config,**args):
"""Creates appropriate directory structure for TIES calculation given that it is already restructured using dir_structure function of FabSim.
"""
if not args.get('cores'):
args["cores"] = 6240
update_environment(args)
# Workaround to ensure env.cores is set before we calculate cores_per_lambda.
if not env.get('replicas'):
env.replicas=5
if not env.get('lambda_list'):
env.update(dict(lambda_list= '0.00 0.05 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 0.95 1.00'))
print "WARNING: lambda_list argument not specified. Setting a default value of", env.lambda_list
with_config(config)
execute(put_configs,config)
env.cores_per_lambda = int(env.cores) / len(env.lambda_list.split(" "))
env.cores_per_replica_per_lambda = int(env.cores_per_lambda) / int(env.replicas)
job(dict(script=env.bac_ties_script, stages_eq=11, stages_sim=1, wall_time='12:00:00', memory='2G'),args)
# for i in env.lambda_list.split(" "):
# run("rsync -avz --exclude 'LAMBDA_*' %s/ %s/LAMBDA_%.2f/" % (env.job_config_path, env.job_config_path, float(i)))
# job(dict(script=env.bac_ties_script,cores=960, stages_eq=11, stages_sim=1, replicas=10, lambda_list=env.lambda_list, lambda_index='%.2f' % float(i), wall_time='12:00:00', memory='2G'),args)
@task
def bac_namd_supermuclike(config,**args):
"""Submit ensemble NAMD equilibration-simulation jobs to the SuperMUC or similar machines.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
stages : this is usually 11 for equilibration (WT case) and 4 for simulation
wall_time : wall-time job limit
memory : memory per node
"""
if not args.get('cores'):
args["cores"] = 7000
update_environment(args)
if not env.get('replicas'):
env.replicas=25
calc_nodes()
env.nodes_new = "%s" % (int(env.nodes)+1)
env.cores_per_replica = int(env.cores) / int(env.replicas)
if not env.get('nodes_per_replica'):
env.update(dict(nodes_per_replica = int(env.cores_per_replica) / int(env.corespernode)))
with_config(config)
local("cp %s/redis_header.txt %s" % (env.local_templates_path[-1], env.job_config_path_local))
execute(put_configs,config)
job(dict(script=env.bac_ensemble_namd_script,
stages_eq=11, stages_sim=1, wall_time='06:00:00',memory='2G', job_type='MPICH', job_class='general', island_count='1', nodes_new=env.nodes_new),args)
@task
def bac_ties_supermuclike(config,**args):
"""Creates appropriate directory structure for TIES calculation given that it is already restructured using dir_structure function of FabSim.
"""
# Workaround to ensure env.cores is set before we calculate cores_per_lambda.
if not args.get('cores'):
args["cores"] = 18200
update_environment(args)
if not env.get('replicas'):
env.replicas=5
if not env.get('lambda_list'):
env.update(dict(lambda_list= '0.00 0.05 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 0.95 1.00'))
print "WARNING: lambda_list argument not specified. Setting a default value of", env.lambda_list
env.cores_per_lambda = int(env.cores) / len(env.lambda_list.split(" "))
env.cores_per_replica_per_lambda = int(env.cores_per_lambda) / int(env.replicas)
if not env.get('nodes_per_replica_per_lambda'):
env.update(dict(nodes_per_replica_per_lambda = int(env.cores_per_replica_per_lambda) / int(env.corespernode)))
calc_nodes()
env.nodes_new = "%s" % (int(env.nodes)+1)
with_config(config)
local("cp %s/redis_header.txt %s" % (env.local_templates_path[-1], env.job_config_path_local))
execute(put_configs,config)
job(dict(script=env.bac_ties_script, stages_eq=11, stages_sim=1, wall_time='06:00:00', memory='2G', job_type='MPICH', job_class='general', island_count='1', nodes_new=env.nodes_new),args)
@task
def bac_nmode_archerlike(config,**args):
"""Submit ensemble NMODE/MMPB(GB)SA jobs to the ARCHER or similar machines.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
wall_time : wall-time job limit
memory : memory per node
"""
if not args.get('cores'):
args["cores"] = 240
update_environment(args)
with_config(config)
execute(put_configs,config)
job(dict(script=env.bac_ensemble_nmode_script,
replicas=5, wall_time='12:00:00',memory='2G'),args)
@task
def bac_nmode_hartreelike(config,**args):
"""Submits ensemble NMODE/MMPB(GB)SA equilibration-simulation jobs to HARTREE or similar machines.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
stages : this is usually 11 for equilibration (WT case) and 4 for simulation
wall_time : wall-time job limit
memory : memory per node
mem : memory of nodes requested for Bluewonder Phase 1. By default is 32000,
but higher memory nodes can be requested by using other values. For eg. use 64000 for >64 GB memory nodes.
"""
if not args.get('cores'):
args["cores"] = 24
update_environment(args)
if not env.get('replicas'):
env.update(dict(replicas=25))
print "WARNING: replicas argument not specified. Setting a default value of", env.replicas
# sys.exit()
with_config(config)
execute(put_configs,config)
for ri in xrange(1,int(env.replicas)+1):
job(dict(script=env.bac_ensemble_nmode_script,
wall_time='24:00', memory='2G', mem=25000, replica_index=ri),args)
@task
def bac_nm_remote_archerlike(**args):
"""Submit ensemble NMODE/MMPB(GB)SA jobs to the ARCHER or similar machines,
when the simulation data is already on the remote machine.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
wall_time : wall-time job limit
memory : memory per node
remote_path : The path of root directory where all data of ensemble jobs is located;
to be provided by user as an argument
"""
if not args.get('cores'):
args["cores"] = 1200
update_environment(args)
with_config('')
#execute(put_configs,config)
#print "$results_path"
job(dict(config='',script=env.bac_ensemble_nm_remote_script,
replicas=25, wall_time='24:00:00',memory='2G'),args)
@task
def bac_nm_remote_hartreelike(**args):
"""Submits ensemble NMODE/MMPB(GB)SA equilibration-simulation jobs to HARTREE or similar machines,
when the simulation data is already on the remote machine.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
stages : this is usually 11 for equilibration (WT case) and 4 for simulation
wall_time : wall-time job limit
memory : memory per node
mem : memory of nodes requested for Bluewonder Phase 1. By default is 32000,
but higher memory nodes can be requested by using other values. For eg. use 64000 for >64 GB memory nodes.
remote_path : The path of root directory where all data of ensemble jobs is located;
to be provided by user as an argument
"""
if not args.get('cores'):
args["cores"] = 24
update_environment(args)
if not env.get('replicas'):
env.update(dict(replicas=25))
print "WARNING: replicas argument not specified. Setting a default value of", env.replicas
# sys.exit()
with_config('')
#execute(put_configs,config)
for ri in xrange(1,int(env.replicas)+1):
job(dict(config='',script=env.bac_ensemble_nm_remote_script,
wall_time='24:00', memory='2G', mem=25000, replica_index=ri),args)
@task
def find_namd_executable():
"""
Searches module system to locate a NAMD executable.
"""
namd_modules = probe('namd')
print namd_modules
for line in namd_modules.split("\n"):
if "(" in line:
print line
stripped_line = (line.strip()).split("(")
print "which namd2"
namd = run("module load %s && which namd2" % stripped_line[0])
print "FabMD: NAMD executable is located at:", namd
return namd
print "No relevant modules found. Trying a basic which command."
namd = run("which namd2")
return namd
@task
def dir_structure(num_rep,path):
""" Creates appropriate directory structure for ensemble simulations from the initial directory structure created by BAC builder.
num_rep is number of replicas desired and path is the full path (upto rep0) of the original directory created by BAC builder. """
if len(num_rep)<1:
print "error: number of replicas not defined."
sys.exit()
if len(path)<1:
print "error: path of rep0 not defined."
sys.exit()
print "restructuring directory for ensemble simulations"
local("mkdir -p %s/replicas/rep1" % path)
for d in ['data', 'dcds', 'analysis_scripts', 'run_scripts']:
local("rm -r %s/%s" % (path, d))
for d in ['equilibration','simulation']:
local("mv %s/%s %s/replicas/rep1 2>/dev/null; true" % (path, d, path))
local("mv %s/fe-calc/build/* %s/build/ ; rm -r %s/fe-calc" % (path, path, path))
local("mkdir -p %s/replicas/rep1/fe-calc" % path)
for x in xrange(2, int(num_rep) + 1):
local("cp -r %s/replicas/rep1 %s/replicas/rep%s" % (path, path, x))
local("cp %s/fep.tcl %s" % (env.local_templates_path[0], path))
| |
#!/usr/bin/python
# Copyright 2016 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcpubsub
version_added: "2.3"
short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
description:
- Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
See U(https://cloud.google.com/pubsub/docs) for an overview.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- Subscription pull happens before publish. You cannot publish and pull in the same task.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: True
subscription:
description:
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
See subfields name, push_endpoint and ack_deadline for more information.
required: False
name:
description: Subfield of subscription. Required if subscription is specified. See examples.
required: False
ack_deadline:
description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
required: False
pull:
description:
- Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name.
max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately
(bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
push_endpoint:
description:
- Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
required: False
publish:
description:
- List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
Only message is required.
required: False
state:
description:
- State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is
specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the
subscription.
required: False
default: "present"
'''
EXAMPLES = '''
# Create a topic and publish a message to it
# (Message will be pushed; there is no check to see if the message was pushed before
# Topics:
## Create Topic
gcpubsub:
topic: ansible-topic-example
state: present
## Delete Topic
### Subscriptions associated with topic are not deleted.
gcpubsub:
topic: ansible-topic-example
state: absent
## Messages: publish multiple messages, with attributes (key:value available with the message)
### setting absent will keep the messages from being sent
gcpubsub:
topic: "{{ topic_name }}"
state: present
publish:
- message: "this is message 1"
attributes:
mykey1: myvalue
mykey2: myvalu2
mykey3: myvalue3
- message: "this is message 2"
attributes:
server: prod
sla: "99.9999"
owner: fred
# Subscriptions
## Create Subscription (pull)
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: present
## Create Subscription with ack_deadline and push endpoint
### pull is default, ack_deadline is not required
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
ack_deadline: "60"
push_endpoint: http://pushendpoint.example.com
state: present
## Subscription change from push to pull
### setting push_endpoint to "None" converts subscription to pull.
gcpubsub:
topic: ansible-topic-example
subscription:
name: mysub
push_endpoint: "None"
## Delete subscription
### Topic will not be deleted
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: absent
## Pull messages from subscription
### only pull keyword is required.
gcpubsub:
topic: ansible-topic-example
subscription:
name: ansible-topic-example-sub
pull:
message_ack: yes
max_messages: "100"
'''
RETURN = '''
publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
Only message is required.
returned: Only when specified
type: list
sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
pulled_messages:
description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
returned: Only when subscription.pull is specified
type: list
sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
state:
description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
returned: Always
type: str
sample: "present"
subscription:
description: Name of subscription.
returned: When subscription fields are specified
type: str
sample: "mysubscription"
topic:
description: Name of topic.
returned: Always
type: str
sample: "mytopic"
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
CLOUD_CLIENT = 'google-cloud-pubsub'
CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
def publish_messages(message_list, topic):
with topic.batch() as batch:
for message in message_list:
msg = message['message']
attrs = {}
if 'attributes' in message:
attrs = message['attributes']
batch.publish(bytes(msg), **attrs)
return True
def pull_messages(pull_params, sub):
"""
:rtype: tuple (output, changed)
"""
changed = False
max_messages=pull_params.get('max_messages', None)
message_ack = pull_params.get('message_ack', 'no')
return_immediately = pull_params.get('return_immediately', False)
output= []
pulled = sub.pull(return_immediately=return_immediately,
max_messages=max_messages)
for ack_id, msg in pulled:
msg_dict = {'message_id': msg.message_id,
'attributes': msg.attributes,
'data': msg.data,
'ack_id': ack_id }
output.append(msg_dict)
if message_ack:
ack_ids = [m['ack_id'] for m in output]
if ack_ids:
sub.acknowledge(ack_ids)
changed = True
return (output, changed)
def main():
module = AnsibleModule(argument_spec=dict(
topic=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
publish=dict(type='list', default=None),
subscription=dict(type='dict', default=None),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
mod_params = {}
mod_params['publish'] = module.params.get('publish')
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['subscription'] = module.params.get('subscription')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
changed = False
json_output = {}
t = None
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
s = None
if mod_params['subscription']:
# Note: default ack deadline cannot be changed without deleting/recreating subscription
s = t.subscription(mod_params['subscription']['name'],
ack_deadline=mod_params['subscription'].get('ack_deadline', None),
push_endpoint=mod_params['subscription'].get('push_endpoint', None))
if mod_params['state'] == 'absent':
# Remove the most granular resource. If subcription is specified
# we remove it. If only topic is specified, that is what is removed.
# Note that a topic can be removed without first removing the subscription.
# TODO(supertom): Enhancement: Provide an option to only delete a topic
# if there are no subscriptions associated with it (which the API does not support).
if s is not None:
if s.exists():
s.delete()
changed = True
else:
if t.exists():
t.delete()
changed = True
elif mod_params['state'] == 'present':
if not t.exists():
t.create()
changed = True
if s:
if not s.exists():
s.create()
s.reload()
changed = True
else:
# Subscription operations
# TODO(supertom): if more 'update' operations arise, turn this into a function.
s.reload()
push_endpoint=mod_params['subscription'].get('push_endpoint', None)
if push_endpoint is not None:
if push_endpoint != s.push_endpoint:
if push_endpoint == 'None':
push_endpoint = None
s.modify_push_configuration(push_endpoint=push_endpoint)
s.reload()
changed = push_endpoint == s.push_endpoint
if 'pull' in mod_params['subscription']:
if s.push_endpoint is not None:
module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
(json_output['pulled_messages'], changed) = pull_messages(
mod_params['subscription']['pull'], s)
# publish messages to the topic
if mod_params['publish'] and len(mod_params['publish']) > 0:
changed = publish_messages(mod_params['publish'], t)
json_output['changed'] = changed
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| |
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for consistency group code.
"""
import ddt
import mock
from oslo_serialization import jsonutils
from six.moves import http_client
import webob
from cinder import context
from cinder import db
from cinder import exception
import cinder.group
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
from cinder.volume import api as volume_api
@ddt.ddt
class ConsistencyGroupsAPITestCase(test.TestCase):
"""Test Case for consistency groups API."""
def setUp(self):
super(ConsistencyGroupsAPITestCase, self).setUp()
self.cg_api = cinder.group.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
def _create_consistencygroup(
self,
ctxt=None,
name='test_consistencygroup',
user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
description='this is a test consistency group',
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='az1',
host='fakehost',
status=fields.ConsistencyGroupStatus.CREATING,
**kwargs):
"""Create a consistency group object."""
ctxt = ctxt or self.ctxt
consistencygroup = objects.Group(ctxt)
consistencygroup.user_id = user_id
consistencygroup.project_id = project_id
consistencygroup.availability_zone = availability_zone
consistencygroup.name = name
consistencygroup.description = description
consistencygroup.group_type_id = group_type_id
consistencygroup.volume_type_ids = volume_type_ids
consistencygroup.host = host
consistencygroup.status = status
consistencygroup.update(kwargs)
consistencygroup.create()
return consistencygroup
def test_show_consistencygroup(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = self._create_consistencygroup(
volume_type_ids=[vol_type['id']])
req = webob.Request.blank('/v2/%s/consistencygroups/%s' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
consistencygroup.destroy()
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroup']['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroup']['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroup']['name'])
self.assertEqual('creating',
res_dict['consistencygroup']['status'])
self.assertEqual([vol_type['id']],
res_dict['consistencygroup']['volume_types'])
def test_show_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/%s/consistencygroups/%s' %
(fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertEqual('Group %s could not be found.' %
fake.WILL_NOT_BE_FOUND_ID,
res_dict['itemNotFound']['message'])
def test_show_consistencygroup_with_null_volume_type(self):
consistencygroup = self._create_consistencygroup(volume_type_id=None)
req = webob.Request.blank('/v2/%s/consistencygroups/%s' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroup']['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroup']['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroup']['name'])
self.assertEqual('creating',
res_dict['consistencygroup']['status'])
self.assertEqual([], res_dict['consistencygroup']['volume_types'])
consistencygroup.destroy()
@ddt.data(2, 3)
def test_list_consistencygroups_json(self, version):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v%(version)s/%(project_id)s/'
'consistencygroups'
% {'version': version,
'project_id': fake.PROJECT_ID})
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
@ddt.data(False, True)
def test_list_consistencygroups_with_limit(self, is_detail):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
url = '/v2/%s/consistencygroups?limit=1' % fake.PROJECT_ID
if is_detail:
url = '/v2/%s/consistencygroups/detail?limit=1' % fake.PROJECT_ID
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(1, len(res_dict['consistencygroups']))
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][0]['id'])
next_link = (
'http://localhost/v2/%s/consistencygroups?limit='
'1&marker=%s' %
(fake.PROJECT_ID, res_dict['consistencygroups'][0]['id']))
self.assertEqual(next_link,
res_dict['consistencygroup_links'][0]['href'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
@ddt.data(False, True)
def test_list_consistencygroups_with_offset(self, is_detail):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
url = '/v2/%s/consistencygroups?offset=1' % fake.PROJECT_ID
if is_detail:
url = '/v2/%s/consistencygroups/detail?offset=1' % fake.PROJECT_ID
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(2, len(res_dict['consistencygroups']))
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][1]['id'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
@ddt.data(False, True)
def test_list_consistencygroups_with_offset_out_of_range(self, is_detail):
url = ('/v2/%s/consistencygroups?offset=234523423455454' %
fake.PROJECT_ID)
if is_detail:
url = ('/v2/%s/consistencygroups/detail?offset=234523423455454' %
fake.PROJECT_ID)
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
@ddt.data(False, True)
def test_list_consistencygroups_with_limit_and_offset(self, is_detail):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
url = '/v2/%s/consistencygroups?limit=2&offset=1' % fake.PROJECT_ID
if is_detail:
url = ('/v2/%s/consistencygroups/detail?limit=2&offset=1' %
fake.PROJECT_ID)
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(2, len(res_dict['consistencygroups']))
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][1]['id'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
@ddt.data(False, True)
def test_list_consistencygroups_with_filter(self, is_detail):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
common_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=False)
consistencygroup3 = self._create_consistencygroup(ctxt=common_ctxt)
url = ('/v2/%s/consistencygroups?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
consistencygroup3.id)
if is_detail:
url = ('/v2/%s/consistencygroups/detail?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
consistencygroup3.id)
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(1, len(res_dict['consistencygroups']))
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][0]['id'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
@ddt.data(False, True)
def test_list_consistencygroups_with_project_id(self, is_detail):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup(
name="group", project_id=fake.PROJECT2_ID)
url = ('/v2/%s/consistencygroups?'
'all_tenants=True&project_id=%s') % (fake.PROJECT_ID,
fake.PROJECT2_ID)
if is_detail:
url = ('/v2/%s/consistencygroups/detail?'
'all_tenants=True&project_id=%s') % (fake.PROJECT_ID,
fake.PROJECT2_ID)
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual(1, len(res_dict['consistencygroups']))
self.assertEqual("group",
res_dict['consistencygroups'][0]['name'])
consistencygroup1.destroy()
consistencygroup2.destroy()
@ddt.data(False, True)
def test_list_consistencygroups_with_sort(self, is_detail):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
url = '/v2/%s/consistencygroups?sort=id:asc' % fake.PROJECT_ID
if is_detail:
url = ('/v2/%s/consistencygroups/detail?sort=id:asc' %
fake.PROJECT_ID)
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
expect_result = [consistencygroup1.id, consistencygroup2.id,
consistencygroup3.id]
expect_result.sort()
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(3, len(res_dict['consistencygroups']))
self.assertEqual(expect_result[0],
res_dict['consistencygroups'][0]['id'])
self.assertEqual(expect_result[1],
res_dict['consistencygroups'][1]['id'])
self.assertEqual(expect_result[2],
res_dict['consistencygroups'][2]['id'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_detail_json(self):
vol_type1 = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type1')
vol_type2 = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type2')
consistencygroup1 = self._create_consistencygroup(
volume_type_ids=[vol_type1['id']])
consistencygroup2 = self._create_consistencygroup(
volume_type_ids=[vol_type1['id']])
consistencygroup3 = self._create_consistencygroup(
volume_type_ids=[vol_type1['id'], vol_type2['id']])
req = webob.Request.blank('/v2/%s/consistencygroups/detail' %
fake.PROJECT_ID)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
cg_ids = [consistencygroup1.id, consistencygroup2.id,
consistencygroup3.id]
vol_type_ids = [vol_type1['id'], vol_type2['id']]
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroups'][0]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][0]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids)
self.assertEqual('creating',
res_dict['consistencygroups'][0]['status'])
for vol_type_id in res_dict['consistencygroups'][0]['volume_types']:
self.assertIn(vol_type_id, vol_type_ids)
self.assertEqual('az1',
res_dict['consistencygroups'][1]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][1]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids)
self.assertEqual('creating',
res_dict['consistencygroups'][1]['status'])
for vol_type_id in res_dict['consistencygroups'][1]['volume_types']:
self.assertIn(vol_type_id, vol_type_ids)
self.assertEqual('az1',
res_dict['consistencygroups'][2]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][2]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids)
self.assertEqual('creating',
res_dict['consistencygroups'][2]['status'])
for vol_type_id in res_dict['consistencygroups'][2]['volume_types']:
self.assertIn(vol_type_id, vol_type_ids)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_json(self, mock_validate):
group_id = fake.CONSISTENCY_GROUP_ID
# Create volume type
vol_type = 'test'
vol_type_id = db.volume_type_create(
self.ctxt, {'name': vol_type, 'extra_specs': {}})['id']
body = {"consistencygroup": {"name": "cg1",
"volume_types": vol_type_id,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertTrue(mock_validate.called)
group_id = res_dict['consistencygroup']['id']
cg = objects.Group.get_by_id(self.ctxt, group_id)
cg.destroy()
def test_create_consistencygroup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID)
req.body = jsonutils.dump_as_bytes(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertEqual("Missing required element 'consistencygroup' in "
"request body.",
res_dict['badRequest']['message'])
def test_delete_consistencygroup_available(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({})
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
def test_delete_consistencygroup_available_used_as_source_success(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
# The other CG used the first CG as source, but it's no longer in
# creating status, so we should be able to delete it.
cg2 = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE,
source_cgid=consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({})
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
cg2.destroy()
def test_delete_consistencygroup_available_no_force(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": False}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual(fields.ConsistencyGroupStatus.DELETING,
consistencygroup.status)
consistencygroup.destroy()
def test_delete_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(None)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertEqual('Group %s could not be found.' %
fake.WILL_NOT_BE_FOUND_ID,
res_dict['itemNotFound']['message'])
def test_delete_consistencygroup_with_invalid_consistencygroup(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.CREATING)
self._assert_deleting_result_400(consistencygroup.id)
consistencygroup.destroy()
def test_delete_consistencygroup_invalid_force(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.CREATING)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
def test_delete_consistencygroup_no_host(self):
consistencygroup = self._create_consistencygroup(
host=None,
status=fields.ConsistencyGroupStatus.ERROR)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(http_client.ACCEPTED, res.status_int)
cg = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'),
consistencygroup.id)
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
self.assertIsNone(cg.host)
@mock.patch('cinder.quota.GROUP_QUOTAS.reserve',
return_value='reservations')
@mock.patch('cinder.quota.GROUP_QUOTAS.commit')
def test_create_delete_consistencygroup_update_quota(self, mock_commit,
mock_reserve):
name = 'mycg'
description = 'consistency group 1'
fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'}
fake_vol_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_vol_type'}
self.mock_object(db, 'group_type_get',
return_value=fake_grp_type)
self.mock_object(db, 'volume_types_get_by_name_or_id',
return_value=[fake_vol_type])
self.mock_object(self.cg_api, '_cast_create_group')
self.mock_object(self.cg_api, 'update_quota')
cg = self.cg_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID, fake_vol_type['name'])
# Verify the quota reservation and commit was called
mock_reserve.assert_called_once_with(self.ctxt,
project_id=self.ctxt.project_id,
groups=1)
mock_commit.assert_called_once_with(self.ctxt, 'reservations')
self.assertEqual(fields.ConsistencyGroupStatus.CREATING, cg.status)
self.assertIsNone(cg.host)
cg.status = fields.ConsistencyGroupStatus.ERROR
self.cg_api.delete(self.ctxt, cg)
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, -1, self.ctxt.project_id)
cg = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'),
cg.id)
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
def test_delete_consistencygroup_with_invalid_body(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"invalid_request_element": {"force": False}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
def test_delete_consistencygroup_with_invalid_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": "abcd"}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
def test_delete_consistencygroup_with_empty_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": ""}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
def _assert_deleting_result_400(self, cg_id, force=False):
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, cg_id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": force}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_delete_consistencygroup_with_volumes(self):
consistencygroup = self._create_consistencygroup(status='available')
utils.create_volume(self.ctxt, group_id=consistencygroup.id,
testcase_instance=self)
self._assert_deleting_result_400(consistencygroup.id)
consistencygroup.destroy()
def test_delete_consistencygroup_with_cgsnapshot(self):
consistencygroup = self._create_consistencygroup(status='available')
# If we don't add a volume to the CG the cgsnapshot creation will fail
vol = utils.create_volume(self.ctxt,
group_id=consistencygroup.id,
testcase_instance=self)
cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID)
utils.create_snapshot(self.ctxt, volume_id=vol.id,
group_snapshot_id=cg_snap.id,
testcase_instance=self)
self._assert_deleting_result_400(consistencygroup.id)
cg_snap.destroy()
consistencygroup.destroy()
def test_delete_consistencygroup_with_cgsnapshot_force(self):
consistencygroup = self._create_consistencygroup(status='available')
# If we don't add a volume to the CG the cgsnapshot creation will fail
vol = utils.create_volume(self.ctxt,
group_id=consistencygroup.id,
testcase_instance=self)
cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID)
utils.create_snapshot(self.ctxt, volume_id=vol.id,
group_snapshot_id=cg_snap.id,
testcase_instance=self)
self._assert_deleting_result_400(consistencygroup.id, force=True)
cg_snap.destroy()
consistencygroup.destroy()
def test_delete_consistencygroup_force_with_volumes(self):
consistencygroup = self._create_consistencygroup(status='available')
utils.create_volume(self.ctxt, consistencygroup_id=consistencygroup.id,
testcase_instance=self)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
def test_delete_cg_force_with_volumes_with_deleted_snapshots(self):
consistencygroup = self._create_consistencygroup(status='available')
vol = utils.create_volume(self.ctxt, testcase_instance=self,
consistencygroup_id=consistencygroup.id)
utils.create_snapshot(self.ctxt, vol.id, status='deleted',
deleted=True, testcase_instance=self)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
def test_create_consistencygroup_failed_no_volume_type(self):
name = 'cg1'
body = {"consistencygroup": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_consistencygroup_success(self, mock_validate):
volume_type_id = utils.create_volume_type(
context.get_admin_context(), self, name='my_vol_type')['id']
fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'}
self.mock_object(db, 'group_type_get',
return_value=fake_grp_type)
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE,
volume_type_ids=[volume_type_id],
group_type_id=fake.GROUP_TYPE_ID,
host='test_host')
# We create another CG from the one we are updating to confirm that
# it will not affect the update if it is not CREATING
cg2 = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE,
host='test_host',
volume_type_ids=[volume_type_id],
source_group_id=consistencygroup.id,)
remove_volume_id = utils.create_volume(
self.ctxt,
testcase_instance=self,
volume_type_id=volume_type_id,
group_id=consistencygroup.id)['id']
remove_volume_id2 = utils.create_volume(
self.ctxt,
testcase_instance=self,
volume_type_id=volume_type_id,
group_id=consistencygroup.id,
status='error')['id']
remove_volume_id3 = utils.create_volume(
self.ctxt,
testcase_instance=self,
volume_type_id=volume_type_id,
group_id=consistencygroup.id,
status='error_deleting')['id']
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
consistencygroup.status)
cg_volumes = db.volume_get_all_by_generic_group(self.ctxt.elevated(),
consistencygroup.id)
cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes]
self.assertIn(remove_volume_id, cg_vol_ids)
self.assertIn(remove_volume_id2, cg_vol_ids)
self.assertIn(remove_volume_id3, cg_vol_ids)
add_volume_id = utils.create_volume(
self.ctxt,
testcase_instance=self,
volume_type_id=volume_type_id)['id']
add_volume_id2 = utils.create_volume(
self.ctxt,
testcase_instance=self,
volume_type_id=volume_type_id)['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
name = 'newcg'
description = 'New Consistency Group Description'
add_volumes = add_volume_id + "," + add_volume_id2
remove_volumes = ','.join(
[remove_volume_id, remove_volume_id2, remove_volume_id3])
body = {"consistencygroup": {"name": name,
"description": description,
"add_volumes": add_volumes,
"remove_volumes": remove_volumes, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertTrue(mock_validate.called)
self.assertEqual(fields.ConsistencyGroupStatus.UPDATING,
consistencygroup.status)
consistencygroup.destroy()
cg2.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_consistencygroup_sourcing_cg(self, mock_validate):
volume_type_id = fake.VOLUME_TYPE_ID
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE,
host='test_host')
cg2 = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.CREATING,
host='test_host',
source_cgid=consistencygroup.id)
remove_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
remove_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
name = 'newcg'
description = 'New Consistency Group Description'
remove_volumes = remove_volume_id + "," + remove_volume_id2
body = {"consistencygroup": {"name": name,
"description": description,
"remove_volumes": remove_volumes, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
consistencygroup.status)
consistencygroup.destroy()
cg2.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_consistencygroup_creating_cgsnapshot(self, mock_validate):
volume_type_id = fake.VOLUME_TYPE_ID
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.AVAILABLE,
host='test_host')
# If we don't add a volume to the CG the cgsnapshot creation will fail
utils.create_volume(self.ctxt,
consistencygroup_id=consistencygroup.id,
testcase_instance=self)
cgsnapshot = utils.create_cgsnapshot(
self.ctxt, consistencygroup_id=consistencygroup.id)
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
add_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
name = 'newcg'
description = 'New Consistency Group Description'
add_volumes = add_volume_id + "," + add_volume_id2
body = {"consistencygroup": {"name": name,
"description": description,
"add_volumes": add_volumes}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.Group.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
consistencygroup.status)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_update_consistencygroup_add_volume_not_found(self):
consistencygroup = self._create_consistencygroup(
ctxt=self.ctxt,
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": None,
"add_volumes": "fake-volume-uuid",
"remove_volumes": None, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_remove_volume_not_found(self):
consistencygroup = self._create_consistencygroup(
ctxt=self.ctxt,
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": "new description",
"add_volumes": None,
"remove_volumes": "fake-volume-uuid", }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_empty_parameters(self):
consistencygroup = self._create_consistencygroup(
ctxt=self.ctxt,
status=fields.ConsistencyGroupStatus.AVAILABLE)
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "",
"description": "",
"add_volumes": None,
"remove_volumes": None, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_state(self):
volume_type_id = fake.VOLUME_TYPE_ID
consistencygroup = self._create_consistencygroup(
ctxt=self.ctxt,
status=fields.ConsistencyGroupStatus.AVAILABLE)
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
status='wrong_status')['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_volume_type(self):
consistencygroup = self._create_consistencygroup(
ctxt=self.ctxt,
status=fields.ConsistencyGroupStatus.AVAILABLE)
wrong_type = fake.VOLUME_TYPE2_ID
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=wrong_type)['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_already_in_cg(self):
consistencygroup = self._create_consistencygroup(
ctxt=self.ctxt,
status=fields.ConsistencyGroupStatus.AVAILABLE)
add_volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=fake.CONSISTENCY_GROUP2_ID)['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_invalid_state(self):
volume_type_id = utils.create_volume_type(
context.get_admin_context(), self, name='my_vol_type')['id']
consistencygroup = self._create_consistencygroup(
status=fields.ConsistencyGroupStatus.CREATING,
volume_type_ids=[volume_type_id],
ctxt=self.ctxt)
add_volume_id = utils.create_volume(
self.ctxt,
testcase_instance=self,
volume_type_id=volume_type_id)['id']
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
(fake.PROJECT_ID, consistencygroup.id))
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "new name",
"description": None,
"add_volumes": add_volume_id,
"remove_volumes": None, }}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_from_src_snap(self, mock_validate):
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
consistencygroup = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.ctxt, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID)
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
group_snapshot_id=cgsnapshot.id,
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
self.assertTrue(mock_validate.called)
cg_ref = objects.Group.get_by_id(
self.ctxt.elevated(), res_dict['consistencygroup']['id'])
cg_ref.destroy()
snapshot.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_create_consistencygroup_from_src_cg(self):
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
source_cg = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
group_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
cg = objects.Group.get_by_id(
self.ctxt, res_dict['consistencygroup']['id'])
cg.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
def test_create_consistencygroup_from_src_both_snap_cg(self):
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
consistencygroup = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
group_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_group_snapshot(
self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
group_id=consistencygroup.id)['id']
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
group_snapshot_id=cgsnapshot_id,
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id,
"source_cgid":
consistencygroup.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
snapshot.destroy()
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_invalid_body(self):
name = 'cg1'
body = {"invalid": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
# Missing 'consistencygroup-from-src' in the body.
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_source_id(self):
name = 'cg1'
body = {"consistencygroup-from-src": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_host(self):
consistencygroup = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID], host=None)
volume_id = utils.create_volume(
self.ctxt,
group_id=consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.ctxt, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
group_snapshot_id=cgsnapshot.id,
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
msg = _('Invalid Group: No host to create group')
self.assertIn(msg, res_dict['badRequest']['message'])
snapshot.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_empty(self):
consistencygroup = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
group_id=consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.ctxt, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
def test_create_consistencygroup_from_src_source_cg_empty(self):
source_cg = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
source_cg.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_notfound(self):
consistencygroup = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
group_id=consistencygroup.id)['id']
test_cg_name = 'test cg'
body = {
"consistencygroup-from-src":
{
"name": test_cg_name,
"description": "Consistency Group 1",
"source_cgid": fake.CGSNAPSHOT_ID
}
}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_source_cg_notfound(self):
test_cg_name = 'test cg'
body = {
"consistencygroup-from-src":
{
"name": test_cg_name,
"description": "Consistency Group 1",
"source_cgid": fake.CONSISTENCY_GROUP_ID
}
}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed(
self, mock_create):
consistencygroup = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
group_id=consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.ctxt, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
snapshot = utils.create_snapshot(
self.ctxt,
volume_id,
group_snapshot_id=cgsnapshot.id,
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
msg = _("Create volume failed.")
self.assertEqual(msg, res_dict['badRequest']['message'])
snapshot.destroy()
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
cgsnapshot.destroy()
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cg_create_volume_failed(
self, mock_create):
source_cg = utils.create_group(
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.ctxt,
group_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
| |
""" Test suite for the code in fixer_util """
# Testing imports
from . import support
# Local imports
from lib2to3.pytree import Node, Leaf
from lib2to3 import fixer_util
from lib2to3.fixer_util import Attr, Name, Call, Comma
from lib2to3.pgen2 import token
def parse(code, strip_levels=0):
# The topmost node is file_input, which we don't care about.
# The next-topmost node is a *_stmt node, which we also don't care about
tree = support.parse_string(code)
for i in range(strip_levels):
tree = tree.children[0]
tree.parent = None
return tree
class MacroTestCase(support.TestCase):
def assertStr(self, node, string):
if isinstance(node, (tuple, list)):
node = Node(fixer_util.syms.simple_stmt, node)
self.assertEqual(str(node), string)
class Test_is_tuple(support.TestCase):
def is_tuple(self, string):
return fixer_util.is_tuple(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_tuple("(a, b)"))
self.assertTrue(self.is_tuple("(a, (b, c))"))
self.assertTrue(self.is_tuple("((a, (b, c)),)"))
self.assertTrue(self.is_tuple("(a,)"))
self.assertTrue(self.is_tuple("()"))
def test_invalid(self):
self.assertFalse(self.is_tuple("(a)"))
self.assertFalse(self.is_tuple("('foo') % (b, c)"))
class Test_is_list(support.TestCase):
def is_list(self, string):
return fixer_util.is_list(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_list("[]"))
self.assertTrue(self.is_list("[a]"))
self.assertTrue(self.is_list("[a, b]"))
self.assertTrue(self.is_list("[a, [b, c]]"))
self.assertTrue(self.is_list("[[a, [b, c]],]"))
def test_invalid(self):
self.assertFalse(self.is_list("[]+[]"))
class Test_Attr(MacroTestCase):
def test(self):
call = parse("foo()", strip_levels=2)
self.assertStr(Attr(Name("a"), Name("b")), "a.b")
self.assertStr(Attr(call, Name("b")), "foo().b")
def test_returns(self):
attr = Attr(Name("a"), Name("b"))
self.assertEqual(type(attr), list)
class Test_Name(MacroTestCase):
def test(self):
self.assertStr(Name("a"), "a")
self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
self.assertStr(Name("a", prefix="b"), "ba")
class Test_Call(MacroTestCase):
def _Call(self, name, args=None, prefix=None):
"""Help the next test"""
children = []
if isinstance(args, list):
for arg in args:
children.append(arg)
children.append(Comma())
children.pop()
return Call(Name(name), children, prefix)
def test(self):
kids = [None,
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 2),
Leaf(token.NUMBER, 3)],
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 3),
Leaf(token.NUMBER, 2), Leaf(token.NUMBER, 4)],
[Leaf(token.STRING, "b"), Leaf(token.STRING, "j", prefix=" ")]
]
self.assertStr(self._Call("A"), "A()")
self.assertStr(self._Call("b", kids[1]), "b(1,2,3)")
self.assertStr(self._Call("a.b().c", kids[2]), "a.b().c(1,3,2,4)")
self.assertStr(self._Call("d", kids[3], prefix=" "), " d(b, j)")
class Test_does_tree_import(support.TestCase):
def _find_bind_rec(self, name, node):
# Search a tree for a binding -- used to find the starting
# point for these tests.
c = fixer_util.find_binding(name, node)
if c: return c
for child in node.children:
c = self._find_bind_rec(name, child)
if c: return c
def does_tree_import(self, package, name, string):
node = parse(string)
# Find the binding of start -- that's what we'll go from
node = self._find_bind_rec('start', node)
return fixer_util.does_tree_import(package, name, node)
def try_with(self, string):
failing_tests = (("a", "a", "from a import b"),
("a.d", "a", "from a.d import b"),
("d.a", "a", "from d.a import b"),
(None, "a", "import b"),
(None, "a", "import b, c, d"))
for package, name, import_ in failing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertFalse(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertFalse(n)
passing_tests = (("a", "a", "from a import a"),
("x", "a", "from x import a"),
("x", "a", "from x import b, c, a, d"),
("x.b", "a", "from x.b import a"),
("x.b", "a", "from x.b import b, c, a, d"),
(None, "a", "import a"),
(None, "a", "import b, c, a, d"))
for package, name, import_ in passing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertTrue(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertTrue(n)
def test_in_function(self):
self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
class Test_find_binding(support.TestCase):
def find_binding(self, name, string, package=None):
return fixer_util.find_binding(name, parse(string), package)
def test_simple_assignment(self):
self.assertTrue(self.find_binding("a", "a = b"))
self.assertTrue(self.find_binding("a", "a = [b, c, d]"))
self.assertTrue(self.find_binding("a", "a = foo()"))
self.assertTrue(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
self.assertFalse(self.find_binding("a", "foo = a"))
self.assertFalse(self.find_binding("a", "foo = (a, b, c)"))
def test_tuple_assignment(self):
self.assertTrue(self.find_binding("a", "(a,) = b"))
self.assertTrue(self.find_binding("a", "(a, b, c) = [b, c, d]"))
self.assertTrue(self.find_binding("a", "(c, (d, a), b) = foo()"))
self.assertTrue(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
self.assertFalse(self.find_binding("a", "(foo, b) = (b, a)"))
self.assertFalse(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
def test_list_assignment(self):
self.assertTrue(self.find_binding("a", "[a] = b"))
self.assertTrue(self.find_binding("a", "[a, b, c] = [b, c, d]"))
self.assertTrue(self.find_binding("a", "[c, [d, a], b] = foo()"))
self.assertTrue(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
self.assertFalse(self.find_binding("a", "[foo, b] = (b, a)"))
self.assertFalse(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
def test_invalid_assignments(self):
self.assertFalse(self.find_binding("a", "foo.a = 5"))
self.assertFalse(self.find_binding("a", "foo[a] = 5"))
self.assertFalse(self.find_binding("a", "foo(a) = 5"))
self.assertFalse(self.find_binding("a", "foo(a, b) = 5"))
def test_simple_import(self):
self.assertTrue(self.find_binding("a", "import a"))
self.assertTrue(self.find_binding("a", "import b, c, a, d"))
self.assertFalse(self.find_binding("a", "import b"))
self.assertFalse(self.find_binding("a", "import b, c, d"))
def test_from_import(self):
self.assertTrue(self.find_binding("a", "from x import a"))
self.assertTrue(self.find_binding("a", "from a import a"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d"))
self.assertTrue(self.find_binding("a", "from x.b import a"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d"))
self.assertFalse(self.find_binding("a", "from a import b"))
self.assertFalse(self.find_binding("a", "from a.d import b"))
self.assertFalse(self.find_binding("a", "from d.a import b"))
def test_import_as(self):
self.assertTrue(self.find_binding("a", "import b as a"))
self.assertTrue(self.find_binding("a", "import b as a, c, a as f, d"))
self.assertFalse(self.find_binding("a", "import a as f"))
self.assertFalse(self.find_binding("a", "import b, c as f, d as e"))
def test_from_import_as(self):
self.assertTrue(self.find_binding("a", "from x import b as a"))
self.assertTrue(self.find_binding("a", "from x import g as a, d as b"))
self.assertTrue(self.find_binding("a", "from x.b import t as a"))
self.assertTrue(self.find_binding("a", "from x.b import g as a, d"))
self.assertFalse(self.find_binding("a", "from a import b as t"))
self.assertFalse(self.find_binding("a", "from a.d import b as t"))
self.assertFalse(self.find_binding("a", "from d.a import b as t"))
def test_simple_import_with_package(self):
self.assertTrue(self.find_binding("b", "import b"))
self.assertTrue(self.find_binding("b", "import b, c, d"))
self.assertFalse(self.find_binding("b", "import b", "b"))
self.assertFalse(self.find_binding("b", "import b, c, d", "c"))
def test_from_import_with_package(self):
self.assertTrue(self.find_binding("a", "from x import a", "x"))
self.assertTrue(self.find_binding("a", "from a import a", "a"))
self.assertTrue(self.find_binding("a", "from x import *", "x"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d", "x"))
self.assertTrue(self.find_binding("a", "from x.b import a", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import *", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b", "a"))
self.assertFalse(self.find_binding("a", "from a.d import b", "a.d"))
self.assertFalse(self.find_binding("a", "from d.a import b", "a.d"))
self.assertFalse(self.find_binding("a", "from x.y import *", "a.b"))
def test_import_as_with_package(self):
self.assertFalse(self.find_binding("a", "import b.c as a", "b.c"))
self.assertFalse(self.find_binding("a", "import a as f", "f"))
self.assertFalse(self.find_binding("a", "import a as f", "a"))
def test_from_import_as_with_package(self):
# Because it would take a lot of special-case code in the fixers
# to deal with from foo import bar as baz, we'll simply always
# fail if there is an "from ... import ... as ..."
self.assertFalse(self.find_binding("a", "from x import b as a", "x"))
self.assertFalse(self.find_binding("a", "from x import g as a, d as b", "x"))
self.assertFalse(self.find_binding("a", "from x.b import t as a", "x.b"))
self.assertFalse(self.find_binding("a", "from x.b import g as a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "a"))
self.assertFalse(self.find_binding("a", "from a import b as t", "b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "t"))
def test_function_def(self):
self.assertTrue(self.find_binding("a", "def a(): pass"))
self.assertTrue(self.find_binding("a", "def a(b, c, d): pass"))
self.assertTrue(self.find_binding("a", "def a(): b = 7"))
self.assertFalse(self.find_binding("a", "def d(b, (c, a), e): pass"))
self.assertFalse(self.find_binding("a", "def d(a=7): pass"))
self.assertFalse(self.find_binding("a", "def d(a): pass"))
self.assertFalse(self.find_binding("a", "def d(): a = 7"))
s = """
def d():
def a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_class_def(self):
self.assertTrue(self.find_binding("a", "class a: pass"))
self.assertTrue(self.find_binding("a", "class a(): pass"))
self.assertTrue(self.find_binding("a", "class a(b): pass"))
self.assertTrue(self.find_binding("a", "class a(b, c=8): pass"))
self.assertFalse(self.find_binding("a", "class d: pass"))
self.assertFalse(self.find_binding("a", "class d(a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, a=7): pass"))
self.assertFalse(self.find_binding("a", "class d(b, *a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, **a): pass"))
self.assertFalse(self.find_binding("a", "class d: a = 7"))
s = """
class d():
class a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_for(self):
self.assertTrue(self.find_binding("a", "for a in r: pass"))
self.assertTrue(self.find_binding("a", "for a, b in r: pass"))
self.assertTrue(self.find_binding("a", "for (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a,) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c in r: a = c"))
self.assertFalse(self.find_binding("a", "for c in a: pass"))
def test_for_nested(self):
s = """
for b in r:
for a in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for a, c in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a, c) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a,) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c, (a, d) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
a = 7"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
d = a"""
self.assertFalse(self.find_binding("a", s))
s = """
for b in r:
for c in a:
d = 7"""
self.assertFalse(self.find_binding("a", s))
def test_if(self):
self.assertTrue(self.find_binding("a", "if b in r: a = c"))
self.assertFalse(self.find_binding("a", "if a in r: d = e"))
def test_if_nested(self):
s = """
if b in r:
if c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
if b in r:
if c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_while(self):
self.assertTrue(self.find_binding("a", "while b in r: a = c"))
self.assertFalse(self.find_binding("a", "while a in r: d = e"))
def test_while_nested(self):
s = """
while b in r:
while c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
while b in r:
while c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_try_except(self):
s = """
try:
a = 6
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_nested(self):
s = """
try:
try:
a = 6
except:
pass
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
try:
b = 8
except KeyError:
pass
except:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
pass
except:
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
try:
b = 8
except:
c = d
except:
try:
b = 6
except:
t = 8
except:
o = y"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally(self):
s = """
try:
c = 6
except:
b = 8
finally:
a = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 9
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally_nested(self):
s = """
try:
c = 6
except:
b = 8
finally:
try:
a = 9
except:
b = 9
finally:
c = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
pass
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
b = 6
finally:
b = 7"""
self.assertFalse(self.find_binding("a", s))
class Test_touch_import(support.TestCase):
def test_after_docstring(self):
node = parse('"""foo"""\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
def test_after_imports(self):
node = parse('"""foo"""\nimport bar\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
def test_beginning(self):
node = parse('bar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), 'import foo\nbar()\n\n')
def test_from_import(self):
node = parse('bar()')
fixer_util.touch_import("html", "escape", node)
self.assertEqual(str(node), 'from html import escape\nbar()\n\n')
def test_name_import(self):
node = parse('bar()')
fixer_util.touch_import(None, "cgi", node)
self.assertEqual(str(node), 'import cgi\nbar()\n\n')
class Test_find_indentation(support.TestCase):
def test_nothing(self):
fi = fixer_util.find_indentation
node = parse("node()")
self.assertEqual(fi(node), "")
node = parse("")
self.assertEqual(fi(node), "")
def test_simple(self):
fi = fixer_util.find_indentation
node = parse("def f():\n x()")
self.assertEqual(fi(node), "")
self.assertEqual(fi(node.children[0].children[4].children[2]), " ")
node = parse("def f():\n x()\n y()")
self.assertEqual(fi(node.children[0].children[4].children[4]), " ")
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C-Sharp style binding definitions """
import glob
import os
import sys
import string
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_parser import ParseFiles
Option('csgen_debug', 'Debug generate.')
Option('cs-enum_prefix', 'Suppress the enum prefix when generating.')
class CGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
def CommentLines(lines, tabs=0):
# Generate a C style comment block by prepending the block with '<tab>/*'
# and adding a '<tab> *' per line.
tab = ' ' * tabs
out = '%s/*' % tab + ('\n%s *' % tab).join(lines)
# Add a terminating ' */' unless the last line is blank which would mean it
# already has ' *'
if not lines[-1]:
out += '/\n'
else:
out += ' */\n'
return out
def Comment(node, prefix=None, tabs=0):
# Generate a comment block from the provided Comment node.
comment = node.GetName()
lines = comment.split('\n')
# If an option prefix is provided, then prepend that to the comment
# for this node.
if prefix:
prefix_lines = prefix.split('\n')
# If both the prefix and comment start with a blank line ('*') remove
# the extra one.
if prefix_lines[0] == '*' and lines[0] == '*':
lines = prefix_lines + lines[1:]
else:
lines = prefix_lines + lines;
return CommentLines(lines, tabs)
def GetNodeComments(node, tabs=0):
# Generate a comment block joining all comment nodes which are children of
# the provided node.
comment_txt = ''
for doc in node.GetListOf('Comment'):
comment_txt += Comment(doc, tabs=tabs)
return comment_txt
class CSGen(object):
# TypeMap
#
# TypeMap modifies how an object is stored or passed, for example pointers
# are passed as 'const' if they are 'in' parameters, and structures are
# preceeded by the keyword 'struct' as well as using a pointer.
#
TypeMap = {
'Array': {
'in': '%s[]',
'inout': '%s[]',
'out': '%s[]',
'store': '%s',
'return': '%s',
'ref': '%s*'
},
'Callspec': {
'in': '%s',
'inout': '%s',
'out': '%s',
'store': '%s',
'return': '%s'
},
'Enum': {
'in': '%s',
'inout': '%s',
'out': '%s',
'store': '%s',
'return': '%s'
},
'Interface': {
'in': 'const %s*',
'inout': '%s*',
'out': '%s**',
'return': '%s*',
'store': '%s*'
},
'Struct': {
'in': '%s',
'inout': '%s',
'out': '%s',
'return': ' %s',
'store': '%s',
'ref': '%s*'
},
'blob_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'mem_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'mem_ptr_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'str_t': {
'in': 'const %s',
'inout': 'ref %s',
'out': '%s',
'return': 'string%.0s%s',
'store': '%s'
},
'cstr_t': {
'in': '%s',
'inout': '%s*',
'out': '%s*',
'return': '%s',
'store': '%s'
},
'TypeValue': {
'in': '%s',
'constptr_in': '%s', # So we can use const* for PP_Var sometimes.
'inout': 'ref %s',
'out': '%s',
'return': '%s',
'store': '%s'
},
}
#
# RemapName
#
# A diction array of PPAPI types that are converted to language specific
# types before being returned by by the C generator
#
RemapName = {
'blob_t': 'IntPtr',
'float_t': 'float',
'double_t': 'double',
'int32_t': 'int',
'uint32_t': 'uint',
'int64_t': 'long',
'uint64_t': 'ulong',
'int16_t': 'short',
'uint16_t': 'ushort',
'handle_t': 'int',
'int8_t': 'sbyte',
'uint8_t': 'byte',
'mem_t': 'IntPtr',
'mem_ptr_t': 'mem_ptr_IntPtr',
'str_t': 'str_t',
'cstr_t': 'string',
'interface_t' : 'const void*'
}
#
# BlackListed
#
# A dictionary of methods that we should not generate code for. The will be manually coded.
#
BlackListed = [
'PPB_Var_VarToUtf8',
]
#
# RemapSizeToType
#
# A dictionary of C# sizes are converted to language specific
# types before being returned by by the C generator
#
RemapSizeToType = {
#'4': 'byte'
}
#
# RemapMode
#
# A diction array of PInvoke marshalling types that are converted to language specific
# types before being returned by the CS generator
#
RemapMode = {
'in': '',
'out': 'out',
'inout': '',
'return': '',
'constptr_in': '',
}
#
# RemapParameter
#
# A diction array of parameter types that need to be remapped
#
RemapParameter = {
'PP_Time': 'double', # PP_TimeXXXX is better mapped to a double
'PP_TimeTicks': 'double',
'PP_TimeDelta': 'double',
'char': 'byte' # when using char[] arrays as unused space in strucures char does not work
}
#
# RemapArgument
#
# A dictionary of argument names that need to be remapped
#
RemapArgument = {
'event': 'eventArg'
}
#
# RemapEnum
#
# A dictionary of enum names that can not be mapped automatically
#
RemapEnum = {
'16Bits': '_16Bits',
}
#
# UnrollArray
#
# A dictionary of argument names that need to be remapped
#
UnrollArray = [
'PPGamepadSampleData'
]
# Tell how to handle pointers to GL types.
for gltype in ['GLbitfield', 'GLboolean', 'GLbyte', 'GLclampf',
'GLclampx', 'GLenum', 'GLfixed', 'GLfloat', 'GLint',
'GLintptr', 'GLshort', 'GLsizei', 'GLsizeiptr',
'GLubyte', 'GLuint', 'GLushort']:
ptrtype = gltype + '_ptr_t'
TypeMap[ptrtype] = {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': 'const %s',
'store': '%s'
}
RemapName[ptrtype] = '%s*' % gltype
def __init__(self):
self.dbg_depth = 0
#
# Debug Logging functions
#
def Log(self, txt):
if not GetOption('csgen_debug'): return
tabs = ' ' * self.dbg_depth
print '%s%s' % (tabs, txt)
def LogEnter(self, txt):
if txt: self.Log(txt)
self.dbg_depth += 1
def LogExit(self, txt):
self.dbg_depth -= 1
if txt: self.Log(txt)
#
# Interface strings
#
def GetInterfaceString(self, node, version = None):
# If an interface name is specified, use that
name = node.GetProperty('iname')
if not name:
# Otherwise, the interface name is the object's name
# With '_Dev' replaced by '(Dev)' if it's a Dev interface.
name = node.GetName()
if name.endswith('_Dev'):
name = '%s(Dev)' % name[:-4]
if version is None:
return name
return "%s;%s" % (name, version)
#
# Return the array specification of the object.
#
def GetArraySpec(self, node):
assert(node.cls == 'Array')
fixed = node.GetProperty('FIXED')
if fixed:
return '[%s]' % fixed
else:
return '[]'
#
# GetTypeName
#
# For any valid 'typed' object such as Member or Typedef
# the typenode object contains the typename
#
# For a given node return the type name by passing mode.
#
def GetTypeName(self, node, release, prefix=''):
self.LogEnter('GetTypeName of %s rel=%s' % (node, release))
# For Members, Params, and Typedefs get the type it refers to otherwise
# the node in question is it's own type (struct, union etc...)
if node.IsA('Member', 'Param', 'Typedef'):
typeref = node.GetType(release)
else:
typeref = node
if typeref is None:
node.Error('No type at release %s.' % release)
raise CGenError('No type for %s' % node)
# If the type is a (BuiltIn) Type then return it's name
# remapping as needed
if typeref.IsA('Type'):
name = CSGen.RemapName.get(typeref.GetName(), None)
if name is None: name = typeref.GetName()
name = '%s%s' % (prefix, name)
# For Interfaces, use the name + version
elif typeref.IsA('Interface'):
rel = typeref.first_release[release]
name = 'struct %s%s' % (prefix, self.GetStructName(typeref, rel, True))
# For structures, preceed with 'struct' or 'union' as appropriate
elif typeref.IsA('Struct'):
if typeref.GetProperty('union'):
name = '%s%s' % (prefix, typeref.GetName())
else:
name = '%s%s' % (prefix, typeref.GetName())
# If it's an enum, or typedef then return the Enum's name
elif typeref.IsA('Enum', 'Typedef'):
if not typeref.LastRelease(release):
first = node.first_release[release]
ver = '_' + node.GetVersion(first).replace('.','_')
else:
ver = ''
name = '%s%s%s' % (prefix, typeref.GetName(), ver)
else:
raise RuntimeError('Getting name of non-type %s.' % node)
self.LogExit('GetTypeName %s is %s' % (node, name))
return name
#
# GetRootType
#
# For a given node return basic type of that object. This is
# either a 'Type', 'Callspec', or 'Array'
#
def GetRootTypeMode(self, node, release, mode):
self.LogEnter('GetRootType of %s' % node)
# If it has an array spec, then treat it as an array regardless of type
if node.GetOneOf('Array'):
rootType = 'Array'
# Or if it has a callspec, treat it as a function
elif node.GetOneOf('Callspec'):
rootType, mode = self.GetRootTypeMode(node.GetType(release), release,
'return')
# If it's a plain typedef, try that object's root type
elif node.IsA('Member', 'Param', 'Typedef'):
rootType, mode = self.GetRootTypeMode(node.GetType(release),
release, mode)
# If it's an Enum, then it's normal passing rules
elif node.IsA('Enum'):
rootType = node.cls
# If it's an Interface or Struct, we may be passing by value
elif node.IsA('Interface', 'Struct'):
if mode == 'return':
if node.GetProperty('returnByValue'):
rootType = 'TypeValue'
else:
rootType = node.cls
else:
if node.GetProperty('passByValue'):
rootType = 'TypeValue'
else:
rootType = node.cls
# If it's an Basic Type, check if it's a special type
elif node.IsA('Type'):
if node.GetName() in CSGen.TypeMap:
rootType = node.GetName()
else:
rootType = 'TypeValue'
else:
raise RuntimeError('Getting root type of non-type %s.' % node)
self.LogExit('RootType is "%s"' % rootType)
return rootType, mode
def GetTypeByMode(self, node, release, mode):
self.LogEnter('GetTypeByMode of %s mode=%s release=%s' %
(node, mode, release))
name = self.GetTypeName(node, release)
ntype, mode = self.GetRootTypeMode(node, release, mode)
out = CSGen.TypeMap[ntype][mode] % name
self.LogExit('GetTypeByMode %s = %s' % (node, out))
return out
# Get the passing mode of the object (in, out, inout).
def GetParamMode(self, node):
self.Log('GetParamMode for %s' % node)
if node.GetProperty('in'): return 'in'
if node.GetProperty('out'): return 'out'
if node.GetProperty('inout'): return 'inout'
if node.GetProperty('constptr_in'): return 'constptr_in'
return 'return'
#
# GetComponents
#
# Returns the signature components of an object as a tuple of
# (rtype, name, arrays, callspec) where:
# rtype - The store or return type of the object.
# name - The name of the object.
# arrays - A list of array dimensions as [] or [<fixed_num>].
# args - None if not a function, otherwise a list of parameters.
#
def GetComponents(self, node, release, mode, compose_mode=''):
self.LogEnter('GetComponents mode %s for %s %s' % (mode, node, release))
# Generate passing type by modifying root type
rtype = self.GetTypeByMode(node, release, mode)
# If this is an array output, change it from type* foo[] to type** foo.
# type* foo[] means an array of pointers to type, which is confusing.
arrayspec = [self.GetArraySpec(array) for array in node.GetListOf('Array')]
if mode == 'out' and len(arrayspec) == 1 and arrayspec[0] == '[]':
#rtype += '*'
del arrayspec[0]
if node.IsA('Enum', 'Interface', 'Struct'):
rname = node.GetName()
else:
rname = node.GetType(release).GetName()
if rname in CSGen.RemapName:
rname = CSGen.RemapName[rname]
if '%' in rtype:
rtype = rtype % rname
if rtype in CSGen.RemapParameter:
rtype = CSGen.RemapParameter[rtype]
rtype = self.FormatName(rtype)
name = self.FormatName(node.GetName())
callnode = node.GetOneOf('Callspec')
if callnode:
callspec = []
for param in callnode.GetListOf('Param'):
if not param.IsRelease(release):
continue
mode = self.GetParamMode(param)
ptype, pname, parray, pspec = self.GetComponents(param, release, mode)
if ptype in CSGen.RemapParameter:
ptype = CSGen.RemapParameter[ptype]
if compose_mode:
ptype = '%s %s' % (CSGen.RemapMode[mode], ptype)
callspec.append((ptype, pname, parray, pspec))
else:
callspec.append((ptype, pname, parray, pspec))
else:
callspec = None
self.LogExit('GetComponents: %s, %s, %s, %s' %
(rtype, name, arrayspec, callspec))
return (rtype, name, arrayspec, callspec)
def StripUnderScores(self, strip) :
return "".join(strip.split('_'))
def FormatName(self, name) :
if name.startswith('ref '):
fname = name[4:]
if fname.startswith('PP_') or fname.startswith('PB_') or fname.startswith('PPP_') or fname.startswith('PPB'):
name = self.StripUnderScores(name)
else:
if name.startswith('PP_') or name.startswith('PB_') or name.startswith('PPP_') or name.startswith('PPB'):
name = self.StripUnderScores(name)
return name
def FormatArgs(self, c_operator, args_spec):
args = []
for rtype, name, array_dims, more_args in args_spec:
if name in CSGen.RemapArgument:
name = CSGen.RemapArgument[name]
# special for unsafe - The need to be converted to IntPtr
if rtype.startswith('out IntPtr') \
or rtype.startswith('out str_t') \
or rtype.startswith(' const IntPtr') \
or rtype.startswith(' const str_t') \
or rtype.startswith(' ref str_t'):
args.append('(IntPtr) %s_' % name)
elif rtype.startswith('out ') or rtype.startswith('ref '):
args.append('%s%s' % (rtype[0:4], name))
elif rtype.startswith(' ref '): # TODO: look into why there is a space
args.append('%s%s' % (rtype[0:5], name))
else:
args.append('%s' % name)
return ', '.join(args)
def Compose(self, rtype, name, arrayspec, callspec, prefix, func_as_ptr,
include_name, unsized_as_ptr, compose_mode=''):
self.LogEnter('Compose: %s %s' % (rtype, name))
arrayspec = ''.join(arrayspec)
# Switch unsized array to a ptr. NOTE: Only last element can be unsized.
if unsized_as_ptr and arrayspec[-2:] == '[]':
prefix += '*'
arrayspec=arrayspec[:-2]
if not include_name:
name = prefix + arrayspec
else:
if not compose_mode:
if arrayspec:
name = prefix + name
else:
name = prefix + name + arrayspec
if name in CSGen.RemapArgument:
name = CSGen.RemapArgument[name]
if callspec is None:
if rtype in CSGen.RemapParameter:
out = '%s %s' % (CSGen.RemapParameter[rtype], name)
else:
if compose_mode:
out = '%s %s' % (rtype, name)
else:
if arrayspec:
if rtype in CSGen.UnrollArray:
aSize = arrayspec[1:len(arrayspec)-1:]
out = ''
for i in range(0,int(aSize)-1):
out += 'internal %s %s_%s;\n' % (rtype, name, i+1)
out += 'internal %s %s_%s' % (rtype, name, int(aSize))
else:
out = 'internal unsafe fixed %s %s%s' % (rtype, name, arrayspec)
else:
out = 'internal %s %s' % (rtype, name)
else:
params = []
for ptype, pname, parray, pspec in callspec:
params.append(self.Compose(ptype, pname, parray, pspec, '', True,
include_name=True,
unsized_as_ptr=unsized_as_ptr,
compose_mode=compose_mode))
if func_as_ptr:
name = '%s' % name
if compose_mode.startswith('EntryPoint_'):
out = '[DllImport("PepperPlugin", EntryPoint = "%s_%s")]\n' % (compose_mode[len('EntryPoint_'):], name)
# We now check for unsafe parameters
entryParams = []
for eparm in params:
if eparm.startswith('out IntPtr'):
eparm = eparm[len('out '):]
elif eparm.startswith(' const IntPtr'):
eparm = eparm[len(' const '):]
elif eparm.startswith('out str_t'):
eparm = 'IntPtr' + eparm[len('out str_t'):]
elif eparm.startswith(' const str_t'):
eparm = 'IntPtr' + eparm[len(' const str_t'):]
elif eparm.startswith(' ref str_t'):
eparm = 'IntPtr' + eparm[len(' ref str_t'):]
entryParams.append(eparm)
out += 'extern static %s _%s (%s);\n' % (rtype, name, ', '.join(entryParams))
else:
# We now check for unsafe parameters
entryParams = []
unsafeParams = []
for eparm in params:
if eparm.startswith('out IntPtr'):
unsafeParams.append(eparm[len('out IntPtr '):])
eparm = 'byte[]' + eparm[len('out IntPtr'):]
elif eparm.startswith(' const IntPtr'):
unsafeParams.append(eparm[len(' const IntPtr '):])
eparm = 'byte[]' + eparm[len(' const IntPtr'):]
elif eparm.startswith('out str_t'):
unsafeParams.append(eparm[len('out str_t '):])
eparm = 'byte[]' + eparm[len('out str_t'):]
elif eparm.startswith(' const str_t'):
unsafeParams.append(eparm[len(' const str_t '):])
eparm = 'byte[]' + eparm[len(' const str_t'):]
elif eparm.startswith(' ref str_t'):
unsafeParams.append(eparm[len(' ref str_t '):])
eparm = 'byte[]' + eparm[len(' ref str_t'):]
entryParams.append(eparm)
out = 'public static %s %s (%s) \n{\n' % (rtype, name, ', '.join(entryParams))
needsReturn = ''
if rtype != 'void':
needsReturn = 'return'
tabs = '\t'
if unsafeParams:
# Generate some sanity checks for the unsafe parameters
for cparm in unsafeParams:
out += '%sif (%s == null)\n%s\tthrow new ArgumentNullException ("%s");\n\n' % (tabs,cparm,tabs,cparm)
out += '%sunsafe\n%s{\n' % (tabs,tabs)
for uparm in unsafeParams:
tabs += '\t'
out += '%sfixed (byte* %s_ = &%s[0])\n%s{\n' % (tabs,uparm, uparm[0:len(uparm)], tabs)
tabs += '\t'
out += '%s%s _%s (%s);\n' % (tabs,needsReturn, name, self.FormatArgs('', callspec))
if unsafeParams:
tabs = tabs[0:len(tabs) - 1]
for uparm in unsafeParams:
out += '%s}\n' % tabs
tabs = tabs[0:len(tabs) - 1]
out += '%s}\n' % tabs
out += '}\n\n'
self.LogExit('Exit Compose: %s' % out)
return out
#
# GetSignature
#
# Returns the 'C' style signature of the object
# prefix - A prefix for the object's name
# func_as_ptr - Formats a function as a function pointer
# include_name - If true, include member name in the signature.
# If false, leave it out. In any case, prefix is always
# included.
# include_version - if True, include version in the member name
#
def GetSignature(self, node, release, mode, prefix='', func_as_ptr=True,
include_name=True, include_version=False, compose_mode=''):
self.LogEnter('GetSignature %s %s as func=%s' %
(node, mode, func_as_ptr))
rtype, name, arrayspec, callspec = self.GetComponents(node, release, mode, compose_mode)
# Member name can not be the same as enclosing type
if node.cls == 'Typedef':
if name == self.GetStructName(node, release, False):
name = name.lower()
if include_version:
name = self.GetStructName(node, release, True)
# If not a callspec (such as a struct) use a ptr instead of []
unsized_as_ptr = not callspec
out = self.Compose(rtype, name, arrayspec, callspec, prefix,
func_as_ptr, include_name, unsized_as_ptr, compose_mode)
self.LogExit('Exit GetSignature: %s' % out)
return out
# Define a Typedef.
def DefineTypedef(self, node, releases, prefix='', comment=False, compose_mode=''):
__pychecker__ = 'unusednames=comment'
build_list = node.GetUniqueReleases(releases)
rtype, name, arrayspec, callspec = self.GetComponents(node, build_list[-1], 'return')
#if this is a Typedef that has a callspec we will not handle that right now
#example being a callback function.
if callspec:
params = []
for ptype, pname, parray, pspec in callspec:
params.append(self.Compose(ptype, pname, parray, pspec, '', True,
include_name=True,
unsized_as_ptr=True,
compose_mode='Params'))
entryParams = []
for eparm in params:
if eparm.startswith('out IntPtr'):
eparm = eparm[len('out '):]
elif eparm.startswith('const IntPtr'):
eparm = eparm[len('const '):]
entryParams.append(eparm)
out = '[UnmanagedFunctionPointer(CallingConvention.Cdecl)]\npublic delegate %s %s (%s);\n\n' % (rtype, name,
', '.join(entryParams))
else:
out = '[StructLayout(LayoutKind.Sequential)]\npublic partial struct %s {\n\t%s;\n}\n' % (self.FormatStructName(node.GetName()),
self.GetSignature(node, build_list[-1], 'return',
prefix, True,
include_version=False))
# We will not support version mangling right now
# Version mangle any other versions
#for index, rel in enumerate(build_list[:-1]):
# out += '\n'
# out += 'typedef %s;\n' % self.GetSignature(node, rel, 'return',
# prefix, True,
# include_version=True)
self.Log('DefineTypedef: %s' % out)
return out
def is_number(self, s):
try:
complex(s) # for int, long, float and complex
except ValueError:
return False
return True
def GetItemName(self, enum, item):
suppress_prefix = GetOption('cs-enum_prefix')
if suppress_prefix:
enumeration = item
if item.upper().startswith(enum.upper()+"_"):
enumeration = item[len(enum+"_"):]
elif item.upper().startswith(enum.upper()):
enumeration = item[len(enum):]
if self.is_number(enumeration):
enumeration = "_" + enumeration
if enumeration.startswith('PP_'):
enumeration = enumeration[3:]
if enumeration.startswith('PPB_'):
enumeration = enumeration[4:]
enumeration = enumeration.lower()
enumeration = enumeration.replace('_', ' ')
enumeration = string.capwords(enumeration)
enumeration = "".join(enumeration.split(' '))
if self.is_number(enumeration):
enumeration = "_" + enumeration
if enumeration in self.RemapEnum:
enumeration = self.RemapEnum[enumeration]
item = enumeration
return item
def GetEnumValue(self, enum, value):
suppress_prefix = GetOption('cs-enum_prefix')
if suppress_prefix:
enumValue = value
if value.upper().startswith(enum.upper()+"_"):
enumValue = value[len(enum+"_"):]
elif value.upper().startswith(enum.upper()):
enumValue = value[len(enum):]
enumValue = enumValue.lower()
enumValue = enumValue.replace('_', ' ')
enumValue = string.capwords(enumValue)
enumValue = "".join(enumValue.split(' '))
value = enumValue
return value
def GetEnumName(self, enum):
return self.StripUnderScores(enum)
# Define an Enum.
def DefineEnum(self, node, releases, prefix='', comment=False, compose_mode=''):
__pychecker__ = 'unusednames=comment,releases'
self.LogEnter('DefineEnum %s' % node)
name = '%s%s' % (prefix, node.GetName())
notypedef = node.GetProperty('notypedef')
unnamed = node.GetProperty('unnamed')
asize = node.GetProperty('assert_size()')
out = 'public enum %s {' % self.GetEnumName(name)
enumlist = []
for child in node.GetListOf('EnumItem'):
value = child.GetProperty('VALUE')
comment_txt = GetNodeComments(child, tabs=1)
if value:
item_txt = '%s%s = %s' % (prefix, self.GetItemName(name, child.GetName()), self.GetEnumValue(name, value))
else:
item_txt = '%s%s' % (prefix, self.GetItemName(name, child.GetName()))
enumlist.append('%s %s' % (comment_txt, item_txt))
out = '%s\n%s\n}\n' % (out, ',\n'.join(enumlist))
self.LogExit('Exit DefineEnum')
return out
def DefineMember(self, node, releases, prefix='', comment=False, compose_mode=''):
__pychecker__ = 'unusednames=prefix,comment'
release = releases[0]
self.LogEnter('DefineMember %s' % node)
if node.GetProperty('ref'):
out = '%s;' % self.GetSignature(node, release, 'ref', '', True, compose_mode=compose_mode)
else:
if compose_mode:
# Check for blacklisted methods that need to be manually handled.
black = "%s_%s" % (node.parent.GetName(),node.GetName())
if black in CSGen.BlackListed:
print ('/* Not generating entry point methods for %s */\n\n' % black)
out = '/* Not generating entry point methods for %s */\n\n' % black
else:
out = '%s' % self.GetSignature(node, release, 'store', '', True, compose_mode=compose_mode)
else:
out = '%s;' % self.GetSignature(node, release, 'store', '', True, compose_mode=compose_mode)
self.LogExit('Exit DefineMember')
return out
def GetStructName(self, node, release, include_version=False):
suffix = ''
if include_version:
ver_num = node.GetVersion(release)
suffix = ('_%s' % ver_num).replace('.', '_')
structureName = node.GetName() + suffix
return self.FormatStructName(structureName)
def FormatStructName(self, structureName):
return self.StripUnderScores(structureName)
def DefineStructInternals(self, node, release,
include_version=False, comment=True):
channel = node.GetProperty('FILE').release_map.GetChannel(release)
if channel == 'dev':
channel_comment = ' /* dev */'
else:
channel_comment = ''
out = ''
structName = self.GetStructName(node, release, include_version)
if node.GetProperty('union'):
out += 'union %s {%s\n' % (
structName, channel_comment)
else:
out += '[StructLayout(LayoutKind.Sequential)]\npublic partial struct %s {%s\n' % (
structName, channel_comment)
channel = node.GetProperty('FILE').release_map.GetChannel(release)
# Generate Member Functions
members = []
for child in node.GetListOf('Member'):
if channel == 'stable' and child.NodeIsDevOnly():
continue
member = self.Define(child, [release], tabs=1, comment=comment)
if not member:
continue
members.append(member)
out += '%s\n}\n' % '\n'.join(members)
return out
def GetInterfaceName(self, node, release, include_version=False):
suffix = ''
if include_version:
ver_num = node.GetVersion(release)
suffix = ('_%s' % ver_num).replace('.', '_')
return node.GetName() + suffix
def DefineInterfaceInternals(self, node, release,
include_version=False, comment=True):
channel = node.GetProperty('FILE').release_map.GetChannel(release)
if channel == 'dev':
channel_comment = ' /* dev */'
else:
channel_comment = ''
out = ''
interfaceName = ''
if node.GetProperty('union'):
out += 'union %s {%s\n' % (
self.GetStructName(node, release, include_version), channel_comment)
else:
interfaceName = self.GetInterfaceName(node, release, include_version)
out += 'internal static partial class %s {%s\n' % (
self.StripUnderScores(interfaceName), channel_comment)
channel = node.GetProperty('FILE').release_map.GetChannel(release)
# Generate Member Functions for PInvoke
members = []
for child in node.GetListOf('Member'):
if channel == 'stable' and child.NodeIsDevOnly():
continue
member = self.Define(child, [release], tabs=1, comment=comment, compose_mode='EntryPoint_%s' % interfaceName)
if not member:
continue
members.append(member)
member = self.Define(child, [release], tabs=1, comment=comment, compose_mode='Binding')
if not member:
continue
members.append(member)
out += '%s\n}\n' % '\n'.join(members)
return out
def DefineUnversionedInterface(self, node, rel):
out = '\n'
if node.GetProperty('force_struct_namespace'):
# Duplicate the definition to put it in struct namespace. This
# attribute is only for legacy APIs like OpenGLES2 and new APIs
# must not use this. See http://crbug.com/411799
out += self.DefineStructInternals(node, rel,
include_version=False, comment=True)
else:
# Define an unversioned typedef for the most recent version
out += 'typedef struct %s %s;\n' % (
self.GetStructName(node, rel, include_version=True),
self.GetStructName(node, rel, include_version=False))
return out
def DefineStruct(self, node, releases, prefix='', comment=False, compose_mode=''):
__pychecker__ = 'unusednames=comment,prefix'
self.LogEnter('DefineStruct %s' % node)
out = ''
build_list = node.GetUniqueReleases(releases)
newest_stable = None
newest_dev = None
for rel in build_list:
channel = node.GetProperty('FILE').release_map.GetChannel(rel)
if channel == 'stable':
newest_stable = rel
if channel == 'dev':
newest_dev = rel
last_rel = build_list[-1]
# TODO(bradnelson) : Bug 157017 finish multiversion support
if node.IsA('Struct'):
if len(build_list) != 1:
node.Error('Can not support multiple versions of node.')
assert len(build_list) == 1
# Build the most recent one versioned, with comments
if node.GetProperty('union'):
# We do not handle generating for unions automatically right now
print ("Skipping generation of << union %s >>." % node.GetName())
out = "/* Skipping generation of << union %s >>. */" % node.GetName()
else:
out = self.DefineStructInternals(node, last_rel,
include_version=False, comment=True)
self.LogExit('Exit DefineStruct')
return out
def DefineInterface(self, node, releases, prefix='', comment=False, compose_mode=''):
__pychecker__ = 'unusednames=comment,prefix'
self.LogEnter('DefineInterface %s' % node)
out = ''
build_list = node.GetUniqueReleases(releases)
newest_stable = None
newest_dev = None
for rel in build_list:
channel = node.GetProperty('FILE').release_map.GetChannel(rel)
if channel == 'stable':
newest_stable = rel
if channel == 'dev':
newest_dev = rel
last_rel = build_list[-1]
if node.IsA('Interface'):
# Build the most recent one versioned, with comments
out = self.DefineInterfaceInternals(node, last_rel,
include_version=False, comment=True)
self.LogExit('Exit DefineInterface')
return out
#
# Copyright and Comment
#
# Generate a comment or copyright block
#
def Copyright(self, node, cpp_style=False):
lines = node.GetName().split('\n')
if cpp_style:
return '//' + '\n//'.join(filter(lambda f: f != '', lines)) + '\n'
return CommentLines(lines)
def Indent(self, data, tabs=0):
"""Handles indentation and 80-column line wrapping."""
tab = ' ' * tabs
lines = []
for line in data.split('\n'):
# Add indentation
line = tab + line
space_break = line.rfind(' ', 0, 80)
if len(line) <= 80 or 'http://' in line:
# Ignore normal line and URLs permitted by the style guide.
lines.append(line.rstrip())
elif not '(' in line and space_break >= 0:
# Break long typedefs on nearest space.
lines.append(line[0:space_break])
lines.append(' ' + line[space_break + 1:])
else:
left = line.rfind('(') + 1
args = line[left:].split(',')
orig_args = args
orig_left = left
# Try to split on '(arg1)' or '(arg1, arg2)', not '()'
while args[0][0] == ')':
left = line.rfind('(', 0, left - 1) + 1
if left == 0: # No more parens, take the original option
args = orig_args
left = orig_left
break
args = line[left:].split(',')
line_max = 0
for arg in args:
if len(arg) > line_max: line_max = len(arg)
if left + line_max >= 80:
indent = '%s ' % tab
args = (',\n%s' % indent).join([arg.strip() for arg in args])
lines.append('%s\n%s%s' % (line[:left], indent, args))
else:
indent = ' ' * (left - 1)
args = (',\n%s' % indent).join(args)
lines.append('%s%s' % (line[:left], args))
return '\n'.join(lines)
# Define a top level object.
def Define(self, node, releases, tabs=0, prefix='', comment=False, compose_mode=''):
# If this request does not match unique release, or if the release is not
# available (possibly deprecated) then skip.
unique = node.GetUniqueReleases(releases)
if not unique or not node.InReleases(releases):
return ''
self.LogEnter('Define %s tab=%d prefix="%s"' % (node,tabs,prefix))
declmap = dict({
'Enum': CSGen.DefineEnum,
'Function': CSGen.DefineMember,
'Interface': CSGen.DefineInterface,
'Member': CSGen.DefineMember,
'Struct': CSGen.DefineStruct,
'Typedef': CSGen.DefineTypedef
})
out = ''
func = declmap.get(node.cls, None)
if not func:
ErrOut.Log('Failed to define %s named %s' % (node.cls, node.GetName()))
define_txt = func(self, node, releases, prefix=prefix, comment=comment, compose_mode=compose_mode)
comment_txt = GetNodeComments(node, tabs=0)
if comment_txt and comment and not compose_mode.startswith('EntryPoint'):
out += comment_txt
out += define_txt
indented_out = self.Indent(out, tabs)
self.LogExit('Exit Define')
return indented_out
# Clean a string representing an object definition and return then string
# as a single space delimited set of tokens.
def CleanString(instr):
instr = instr.strip()
instr = instr.split()
return ' '.join(instr)
# Test a file, by comparing all it's objects, with their comments.
def TestFile(filenode):
csgen = CSGen()
errors = 0
for node in filenode.GetChildren()[2:]:
instr = node.GetOneOf('Comment')
if not instr: continue
instr.Dump()
instr = CleanString(instr.GetName())
outstr = csgen.Define(node, releases=['M14'])
if GetOption('verbose'):
print outstr + '\n'
outstr = CleanString(outstr)
if instr != outstr:
ErrOut.Log('Failed match of\n>>%s<<\nto:\n>>%s<<\nFor:\n' %
(instr, outstr))
node.Dump(1, comments=True)
errors += 1
return errors
# Build and resolve the AST and compare each file individual.
def TestFiles(filenames):
if not filenames:
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_cgen', '*.idl')
filenames = glob.glob(idldir)
filenames = sorted(filenames)
ast = ParseFiles(filenames)
total_errs = 0
for filenode in ast.GetListOf('File'):
errs = TestFile(filenode)
if errs:
ErrOut.Log('%s test failed with %d error(s).' %
(filenode.GetName(), errs))
total_errs += errs
if total_errs:
ErrOut.Log('Failed generator test.')
else:
InfoOut.Log('Passed generator test.')
return total_errs
def main(args):
filenames = ParseOptions(args)
if GetOption('test'):
return TestFiles(filenames)
ast = ParseFiles(filenames)
cgen = CSGen()
for f in ast.GetListOf('File'):
if f.GetProperty('ERRORS') > 0:
print 'Skipping %s' % f.GetName()
continue
for node in f.GetChildren()[2:]:
print cgen.Define(node, ast.releases, comment=True, prefix='tst_')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
NOTE: THIS FILE WAS MODIFIED TO SUPPORT CLIENT CACHING
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'j.c@google.com (Justin Mattson)'
import email.Utils
import datetime
import logging
import mimetypes
import os
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from django.utils.hashcompat import md5_constructor
def create_handler(zip_files, max_age=None, public=None, client_caching=None):
"""Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
if max_age is not None:
self.MAX_AGE = max_age
if public is not None:
self.PUBLIC = public
if client_caching is not None:
self.CLIENT_CACHING = client_caching
self.TrueGet(name)
return HandlerWrapper
class CacheFile(object):
pass
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
current_last_modified = None # where we save the current last modified datetime
current_etag = None # the current ETag of a file served
CLIENT_CACHING = True # is client caching enabled? (sending Last-Modified and ETag within response!)
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = "cache://" # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = "noncache://" # memcache key prefix for non-existant URL
def TrueGet(self, name):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method
Args:
name: URL requested
Returns:
None
"""
name = self.PreprocessUrl(name)
# see if we have the page in the memcache
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info('Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None or resp_data == -1:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
else:
logging.info('Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return
else:
self.Write404Error()
return
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.current_last_modified = resp_data.lastmod
self.current_etag = resp_data.etag
self.SetCachingHeaders()
# if the received ETag matches
if resp_data.etag == self.request.headers.get('If-None-Match'):
self.error(304)
return
# if-modified-since was passed by the browser
if self.request.headers.has_key('If-Modified-Since'):
dt = self.request.headers.get('If-Modified-Since').split(';')[0]
modsince = datetime.datetime.strptime(dt, "%a, %d %b %Y %H:%M:%S %Z")
if modsince >= self.current_last_modified:
# The file is older than the cached copy (or exactly the same)
self.error(304)
return
self.response.out.write(resp_data.file)
def PreprocessUrl(self, name):
"""Any preprocessing work on the URL when it comes it.
Put any work related to interpretting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
The processed URL
"""
if name[len(name) - 1:] == '/':
return "%s%s" % (name, 'index.html')
else:
return name
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = CacheFile()
info = os.stat(archive_name)
#lastmod = datetime.datetime.fromtimestamp(info[8])
lastmod = datetime.datetime(*zip_archive.getinfo(file_path).date_time)
resp_data.file = zip_archive.read(file_path)
resp_data.lastmod = lastmod
resp_data.etag = '"%s"' % md5_constructor(resp_data.file).hexdigest()
except (KeyError, RuntimeError), err:
# no op
x = False
resp_data = None
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
self.response.headers['Expires'] = email.Utils.formatdate(
time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
self.response.headers['Cache-Control'] = ', '.join(cache_control)
# adding caching headers for the client
if self.CLIENT_CACHING:
if self.current_last_modified:
self.response.headers['Last-Modified'] = self.current_last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT")
if self.current_etag:
self.response.headers['ETag'] = self.current_etag
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get("%s%s" % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add("%s%s" % (self.CACHE_PREFIX, filename), data):
memcache.replace("%s%s" % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning("Data size too large to cache\n%s" % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write('Error 404, file not found')
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add("%s%s" % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get("%s%s" % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| |
import pygame
from pygame.locals import *
import threading
import os
from constants import *
MOUSEDOWN = False
BACKDOWN = False
KEYDOWN = False
SPACEDOWN = False
GUIQUIT = False
hover = False
ENABLED_TEXT_COLOR = (235,235,235)
DISABLED_TEXT_COLOR = (200,200,200)
class guiButton(pygame.Surface):
def __init__(self, caption, position, action=0,parameter=0,sizing=1,y_sizing=1,font_size=14,image=None,enabled=True,name=None):
#initializing
self.name=name
self.clicked=False
self.caption = caption
self.position = position
self.parameter=parameter
self.action = action
self.sizing=sizing
self.font_size = font_size
self.events = [MOUSEBUTTONDOWN,4,MOUSEBUTTONUP]
#loading from files
if image==None:
self.img = pygame.image.load(os.path.normpath("images//gui//blue.png"))
self.pressed = pygame.image.load(os.path.normpath("images//gui//pressed.png"))
else:
self.img=image
self.pressed=image
if self.sizing!=1 or y_sizing!=1:
self.img=pygame.transform.scale(self.img,
(int(self.img.get_width()*self.sizing),
int(self.img.get_height()*y_sizing))
)
self.pressed=pygame.transform.scale(self.pressed,
(int(self.pressed.get_width()*self.sizing),
int(self.pressed.get_height()*y_sizing))
)
self.hover = self.img.copy()
self.hover.fill((255,255,255,0))
self.btn = self.img.copy()
self._width = self.img.get_width()
self._height = self.img.get_height()
self.img = self.img.convert_alpha()
self.font = pygame.font.Font(os.path.normpath("fonts//Kabel.ttf"), self.font_size)
self.text_color = ENABLED_TEXT_COLOR
self._enable = enabled
#calling parent constructor
pygame.Surface.__init__(self, size=(self._width,self._height),flags=pygame.SRCALPHA)
#update surface
self.update_surface()
def update_surface(self):
#writing caption on button in the center
text_surf = self.font.render(self.caption, True, self.text_color)
textpos = text_surf.get_rect()
textpos.center = (self.img.get_rect().center[0],
self.img.get_rect().center[1] - textpos.centery/4)
self.img.blit(text_surf, textpos)
#filling surface with transparent color and than pasting button on it
self.fill((0,0,0,0))
self.blit(self.img,(0,0))
#self.blit(self.hover,(0,0))
def handle_event(self, event):
global hover
if self._enable:
if event.type == 4:
if self.get_rect().collidepoint((event.pos[0]-self.position[0],event.pos[1]-self.position[1])):
if not hover:
self.img = self.btn.copy()
self.hover.fill((255,255,255,20))
self.update_surface()
hover = True
else:
if hover:
self.img = self.btn.copy()
self.hover.fill((255,255,255,0))
self.update_surface()
hover = False
elif event.type==MOUSEBUTTONDOWN:
if event.button == 1:
if self.get_rect().collidepoint((event.pos[0]-self.position[0],event.pos[1]-self.position[1])):
self.img = self.pressed.copy()
self.update_surface()
elif event.type==MOUSEBUTTONUP:
if event.button == 1:
if self.get_rect().collidepoint((event.pos[0]-self.position[0],event.pos[1]-self.position[1])):
if not self.action==0:
self.clicked=True
if self.parameter==0:
self.action()
else:
self.action(self.parameter)
self.img = self.btn.copy()
self.update_surface()
def set_enabled(self, enable):
self._enable = enable
if self._enable:
self.img = self.btn.copy()
self.text_color = ENABLED_TEXT_COLOR
else:
self.img = self.btn.copy()
self.text_color = DISABLED_TEXT_COLOR
self.update_surface()
def set_font_size(self, size):
self.font_size = size
def set_caption(self, caption):
self.caption = caption
def set_position(self, position):
self.position = position
def set_action(self, action):
self.action = action
####################
#guiImageList
###################
class guiImageList(pygame.Surface):
def __init__(self, position, image_paths):
self.position = position
self.images = []
self.scrollx = 0
self.selected = 0
self.events = [MOUSEBUTTONUP]
#loading from files
for img in image_paths:
self.images.append(pygame.image.load(img).convert_alpha())
self.left = pygame.image.load(os.path.normpath("images//gui//left.png")).convert_alpha()
self.right = pygame.image.load(os.path.normpath("images//gui//right.png")).convert_alpha()
self.cover = pygame.image.load(os.path.normpath("images//gui//cover.png")).convert_alpha()
self.bg = pygame.image.load(os.path.normpath("images//gui//bg.png")).convert_alpha()
self.slct_img = pygame.Surface((52,52))
self.width = 240
self.height = 70
#calling parent constructor
pygame.Surface.__init__(self, size=(self.width,self.height),flags=pygame.SRCALPHA)
#filling surface with transparent color and than pasting button on it
self.update_surface()
def update_surface(self):
self.fill((155,200,255))
self.slct_img.fill((225, 231, 68))
self.blit(self.bg, (0, 0))
i = 0
for img in self.images:
if self.selected == i:
self.slct_img.blit(img, (2, 2))
img = self.slct_img
self.blit(img,((64*i)+(-self.scrollx)+32, 10))
i += 1
self.blit(self.cover, (0, 0))
self.blit(self.left, (8,2))
self.blit(self.right, (self.width-(self.right.get_width()+9),2))
def handle_event(self, event):
if event.type==MOUSEBUTTONUP:
if event.button==1:
pos = event.pos
if self.left.get_rect().collidepoint((pos[0]-(self.position[0]+8),pos[1]-self.position[1])):
if self.scrollx >= 64:
self.scrollx -= 64
self.update_surface()
elif self.right.get_rect().collidepoint((pos[0]-(self.position[0]+self.width-(self.right.get_width()+8)),pos[1]-self.position[1])):
if self.scrollx < (len(self.images)-3) * 64:
self.scrollx += 64
self.update_surface()
else:
i = 0
for img in self.images:
if img.get_rect().collidepoint(pos[0]-(self.position[0]+(64*i)+(-self.scrollx)+32),pos[1]-(self.position[1]+8)):
self.selected = i
self.update_surface()
i += 1
################
# guiTextBox
################
class guiTextBox(pygame.Surface):
def __init__(self, position, focus=False, label="Your Text Here ..."):
self.position = position
self.label = label
self.focus = focus
self.events = [MOUSEBUTTONDOWN]
self.mask = pygame.image.load(os.path.normpath("images//gui//textbox.png"))
self.font = pygame.font.Font(os.path.normpath("fonts//Kabel.ttf"), 16)
self.text = ""
self._width = self.mask.get_width()
self._height = self.mask.get_height()
#calling parent constructor
pygame.Surface.__init__(self, size=(self._width,self._height),flags=pygame.SRCALPHA)
#filling surface with transparent color and than pasting button on it
self.update_surface()
thread1 = threading.Thread(target=self.key_event)
thread1.daemon = True
thread1.start()
def update_surface(self):
self.fill((155,200,255))
self.blit(self.mask, (0, 0))
if not self.focus:
text_surf = self.font.render(self.text, True, (50,50,50))
else:
text_surf = self.font.render(self.text+"_", True, (50,50,50))
textpos = text_surf.get_rect().move(5,self.get_rect().center[1] - text_surf.get_rect().centery)
if not self.focus and self.text=="":
text_surf = self.font.render(self.label, True, (200,200,200,150))
textpos = text_surf.get_rect().move(5,self.get_rect().center[1] - text_surf.get_rect().centery)
self.blit(text_surf, textpos)
def handle_event(self, event):
if event.type == MOUSEBUTTONDOWN:
if event.button==1:
if self.get_rect().collidepoint((event.pos[0]-self.position[0],event.pos[1]-self.position[1])):
self.focus = True
self.update_surface()
else:
self.focus = False
self.update_surface()
def key_event(self):
global KEYDOWN
global BACKDOWN
global SPACEDOWN
clock = pygame.time.Clock()
# Event loop
while True:
if GUIQUIT:
return
clock.tick(100)
if self.focus:
k_list = pygame.key.get_pressed()
if k_list[K_BACKSPACE]:
if not BACKDOWN:
self.text = self.text[:len(self.text)-1]
self.update_surface()
BACKDOWN = True
else:
BACKDOWN = False
if k_list[K_SPACE]:
if not SPACEDOWN:
self.text = self.text + ' '
self.update_surface()
SPACEDOWN = True
else:
SPACEDOWN = False
k_list1 = k_list[K_a:K_DELETE]
if True in k_list1:
if not KEYDOWN:
if pygame.key.get_mods() & KMOD_SHIFT:
self.text = self.text + chr(k_list1.index(True)+65)
self.update_surface()
else:
self.text = self.text + chr(k_list1.index(True)+97)
self.update_surface()
KEYDOWN = True
else:
KEYDOWN = False
class playerDialog():
def __init__(self):
self.result = ""
def show(self):
self.thread = threading.Thread(target=self.draw)
self.thread.daemon = True
self.thread.start()
self.thread.join()
pygame.quit()
global GUIQUIT
GUIQUIT = False
return self.result
def draw(self):
# Initialise screen
pygame.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = "{},{}".format(200,100) # x,y position of the screen
screen = pygame.display.set_mode((720, 540)) #witdth and height
pygame.display.set_caption("Monopoly")
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
clock = pygame.time.Clock()
#load
bg_img = pygame.image.load(os.path.normpath("images//gui//bigbg.png"))
font = pygame.font.Font(os.path.normpath("fonts//Kabel.ttf"), 14)
font2 = pygame.font.Font(os.path.normpath("fonts//Kabel.ttf"), 40)
#controls
p_num1 = font2.render("Player 1", True, (200,200,200))
self.text_surf1 = font.render("Name : ", True, (30,30,30))
self.tok_surf1 = font.render("Token : ", True, (30,30,30))
p_num2 = font2.render("Player 2", True, (200,200,200))
self.text_surf2 = font.render("Name : ", True, (30,30,30))
self.tok_surf2 = font.render("Token : ", True, (30,30,30))
control_list = []
control_list.append(guiTextBox((50,200), focus=True, label="player1"))
control_list.append(guiImageList((50,270), TOKENS))
control_list.append(guiTextBox((360 + 50,200), focus=False, label="player2"))
control_list.append(guiImageList((360 + 50,270), TOKENS))
control_list.append(guiButton("Exit",(30,495), lambda: os.kill(os.getpid(),0)))
control_list.append(guiButton("Play!",(620,495), lambda: get_input()))
def get_input():
if not control_list[0].text == "" and not control_list[2].text == "":
self.result=[[control_list[0].text,control_list[1].selected],[control_list[2].text,control_list[3].selected]]
global GUIQUIT
GUIQUIT = True
else:
if control_list[0].text == "":
self.text_surf1 = font.render("Name : ", True, (255,30,30))
if control_list[2].text == "":
self.text_surf2 = font.render("Name : ", True, (255,30,30))
# Event loop
while 1:
clock.tick(100) #FPS
if GUIQUIT:
return
for event in pygame.event.get():
for control in control_list:
if event.type in control.events:
control.handle_event(event)
if event.type == QUIT:
pygame.quit()
os.kill(os.getpid(),0)
background.fill((180, 190, 180))
background.blit(bg_img, (0,0))
background.blit(p_num1, (80, 70))
background.blit(self.tok_surf1,(50,245))
background.blit(self.text_surf1,(50,175))
background.blit(p_num2, (360 + 80, 70))
background.blit(self.tok_surf2,(360 + 50,245))
background.blit(self.text_surf2,(360 + 50,175))
for control in control_list:
background.blit(control,control.position)
screen.blit(background, (0, 0))
pygame.display.flip()
| |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from vispy.color import (Color, ColorArray, get_color_names,
Colormap,
get_color_dict, get_colormap, get_colormaps)
from vispy.visuals.shaders import Function
from vispy.util import use_log_level
from vispy.testing import (run_tests_if_main, assert_equal, assert_raises,
assert_true)
def test_color():
"""Basic tests for Color class"""
x = Color('white')
assert_array_equal(x.rgba, [1.] * 4)
assert_array_equal(x.rgb, [1.] * 3)
assert_array_equal(x.RGBA, [255] * 4)
assert_array_equal(x.RGB, [255] * 3)
assert_equal(x.value, 1.)
assert_equal(x.alpha, 1.)
x.rgb = [0, 0, 1]
assert_array_equal(x.hsv, [240, 1, 1])
assert_equal(x.hex, '#0000ff')
x.hex = '#00000000'
assert_array_equal(x.rgba, [0.]*4)
def test_color_array():
"""Basic tests for ColorArray class"""
x = ColorArray(['r', 'g', 'b'])
assert_array_equal(x.rgb, np.eye(3))
# Test ColorArray.__getitem__.
assert isinstance(x[0], ColorArray)
assert isinstance(x[:], ColorArray)
assert_array_equal(x.rgba[:], x[:].rgba)
assert_array_equal(x.rgba[0], x[0].rgba.squeeze())
assert_array_equal(x.rgba[1:3], x[1:3].rgba)
assert_raises(ValueError, x.__getitem__, (0, 1))
# Test ColorArray.__setitem__.
x[0] = 0
assert_array_equal(x.rgba[0, :], np.zeros(4))
assert_array_equal(x.rgba, x[:].rgba)
x[1] = 1
assert_array_equal(x[1].rgba, np.ones((1, 4)))
x[:] = .5
assert_array_equal(x.rgba, .5 * np.ones((3, 4)))
assert_raises(ValueError, x.__setitem__, (0, 1), 0)
# test hsv color space colors
x = ColorArray(color_space="hsv", color=[(0, 0, 1),
(0, 0, 0.5), (0, 0, 0)])
assert_array_equal(x.rgba[0], [1, 1, 1, 1])
assert_array_equal(x.rgba[1], [0.5, 0.5, 0.5, 1])
assert_array_equal(x.rgba[2], [0, 0, 0, 1])
x = ColorArray(color_space="hsv")
assert_array_equal(x.rgba[0], [0, 0, 0, 1])
x = ColorArray([Color((0, 0, 0)), Color((1, 1, 1))])
assert len(x.rgb) == 2
x = ColorArray([ColorArray((0, 0, 0)), ColorArray((1, 1, 1))])
assert len(x.rgb) == 2
def test_color_interpretation():
"""Test basic color interpretation API"""
# test useful ways of single color init
r = ColorArray('r')
print(r) # test repr
r2 = ColorArray(r)
assert_equal(r, r2)
r2.rgb = 0, 0, 0
assert_equal(r2, ColorArray('black'))
assert_equal(r, ColorArray('r')) # modifying new one preserves old
assert_equal(r, r.copy())
assert_equal(r, ColorArray('#ff0000'))
assert_equal(r, ColorArray('#FF0000FF'))
assert_equal(r, ColorArray('red'))
assert_equal(r, ColorArray('red', alpha=1.0))
assert_equal(ColorArray((1, 0, 0, 0.1)), ColorArray('red', alpha=0.1))
assert_array_equal(r.rgb.ravel(), (1., 0., 0.))
assert_array_equal(r.rgba.ravel(), (1., 0., 0., 1.))
assert_array_equal(r.RGBA.ravel(), (255, 0, 0, 255))
# handling multiple colors
rgb = ColorArray(list('rgb'))
print(rgb) # multi repr
assert_array_equal(rgb, ColorArray(np.eye(3)))
# complex/annoying case
rgb = ColorArray(['r', (0, 1, 0), '#0000ffff'])
assert_array_equal(rgb, ColorArray(np.eye(3)))
assert_raises(RuntimeError, ColorArray, ['r', np.eye(3)]) # can't nest
# getting/setting properties
r = ColorArray('#ffff')
assert_equal(r, ColorArray('white'))
r = ColorArray('#ff000000')
assert_true('turquoise' in get_color_names()) # make sure our JSON loaded
assert_equal(r.alpha, 0)
r.alpha = 1.0
assert_equal(r, ColorArray('r'))
r.alpha = 0
r.rgb = (1, 0, 0)
assert_equal(r.alpha, 0)
assert_equal(r.hex, ['#ff0000'])
r.alpha = 1
r.hex = '00ff00'
assert_equal(r, ColorArray('g'))
assert_array_equal(r.rgb.ravel(), (0., 1., 0.))
r.RGB = 255, 0, 0
assert_equal(r, ColorArray('r'))
assert_array_equal(r.RGB.ravel(), (255, 0, 0))
r.RGBA = 255, 0, 0, 0
assert_equal(r, ColorArray('r', alpha=0))
w = ColorArray()
w.rgb = ColorArray('r').rgb + ColorArray('g').rgb + ColorArray('b').rgb
assert_equal(w, ColorArray('white'))
w = ColorArray('white')
assert_equal(w, w.darker().lighter())
assert_equal(w, w.darker(0.1).darker(-0.1))
w2 = w.darker()
assert_true(w != w2)
w.darker(copy=False)
assert_equal(w, w2)
with use_log_level('warning', record=True, print_msg=False) as w:
w = ColorArray('white')
w.value = 2
assert_equal(len(w), 1)
assert_equal(w, ColorArray('white'))
# warnings and errors
assert_raises(ValueError, ColorArray, '#ffii00') # non-hex
assert_raises(ValueError, ColorArray, '#ff000') # too short
assert_raises(ValueError, ColorArray, [0, 0]) # not enough vals
assert_raises(ValueError, ColorArray, [2, 0, 0]) # val > 1
assert_raises(ValueError, ColorArray, [-1, 0, 0]) # val < 0
c = ColorArray([2., 0., 0.], clip=True) # val > 1
assert_true(np.all(c.rgb <= 1))
c = ColorArray([-1., 0., 0.], clip=True) # val < 0
assert_true(np.all(c.rgb >= 0))
# make sure our color dict works
for key in get_color_names():
assert_true(ColorArray(key))
assert_raises(ValueError, ColorArray, 'foo') # unknown color error
_color_dict = get_color_dict()
assert isinstance(_color_dict, dict)
assert set(_color_dict.keys()) == set(get_color_names())
# Taken from known values
hsv_dict = dict(red=(0, 1, 1),
lime=(120, 1, 1),
yellow=(60, 1, 1),
silver=(0, 0, 0.75),
olive=(60, 1, 0.5),
purple=(300, 1, 0.5),
navy=(240, 1, 0.5))
# Taken from skimage conversions
lab_dict = dict(red=(53.2405879437448, 80.0941668344849, 67.2015369950715),
lime=(87.7350994883189, -86.1812575110439, 83.1774770684517),
yellow=(97.1395070397132, -21.5523924360088, 94.4757817840079),
black=(0., 0., 0.),
white=(100., 0., 0.),
gray=(53.5850240, 0., 0.),
olive=(51.86909754, -12.93002583, 56.67467593))
def test_color_conversion():
"""Test color conversions"""
# HSV
# test known values
test = ColorArray()
for key in hsv_dict:
c = ColorArray(key)
test.hsv = hsv_dict[key]
assert_allclose(c.RGB, test.RGB, atol=1)
test.value = 0
assert_equal(test.value, 0)
assert_equal(test, ColorArray('black'))
c = ColorArray('black')
assert_array_equal(c.hsv.ravel(), (0, 0, 0))
rng = np.random.RandomState(0)
for _ in range(50):
hsv = rng.rand(3)
hsv[0] *= 360
hsv[1] = hsv[1] * 0.99 + 0.01 # avoid ugly boundary effects
hsv[2] = hsv[2] * 0.99 + 0.01
c.hsv = hsv
assert_allclose(c.hsv.ravel(), hsv, rtol=1e-4, atol=1e-4)
# Lab
test = ColorArray()
for key in lab_dict:
c = ColorArray(key)
test.lab = lab_dict[key]
assert_allclose(c.rgba, test.rgba, atol=1e-4, rtol=1e-4)
assert_allclose(test.lab.ravel(), lab_dict[key], atol=1e-4, rtol=1e-4)
for _ in range(50):
# boundaries can have ugly rounding errors in some parameters
rgb = (rng.rand(3)[np.newaxis, :] * 0.9 + 0.05)
c.rgb = rgb
lab = c.lab
c.lab = lab
assert_allclose(c.lab, lab, atol=1e-4, rtol=1e-4)
assert_allclose(c.rgb, rgb, atol=1e-4, rtol=1e-4)
def test_colormap_interpolation():
"""Test interpolation routines for colormaps."""
import vispy.color.colormap as c
assert_raises(AssertionError, c._glsl_step, [0., 1.],)
for fun in (c._glsl_step, c._glsl_mix):
assert_raises(AssertionError, fun, controls=[0.1, 1.],)
assert_raises(AssertionError, fun, controls=[0., .9],)
assert_raises(AssertionError, fun, controls=[0.1, .9],)
# Interpolation tests.
color_0 = np.array([1., 0., 0.])
color_1 = np.array([0., 1., 0.])
color_2 = np.array([0., 0., 1.])
colors_00 = np.vstack((color_0, color_0))
colors_01 = np.vstack((color_0, color_1))
colors_11 = np.vstack((color_1, color_1))
# colors_012 = np.vstack((color_0, color_1, color_2))
colors_021 = np.vstack((color_0, color_2, color_1))
controls_2 = np.array([0., 1.])
controls_3 = np.array([0., .25, 1.])
x = np.array([-1., 0., 0.1, 0.4, 0.5, 0.6, 1., 2.])[:, None]
mixed_2 = c.mix(colors_01, x, controls_2)
mixed_3 = c.mix(colors_021, x, controls_3)
for y in mixed_2, mixed_3:
assert_allclose(y[:2, :], colors_00)
assert_allclose(y[-2:, :], colors_11)
assert_allclose(mixed_2[:, -1], np.zeros(len(y)))
def test_colormap_gradient():
"""Test gradient colormaps."""
cm = Colormap(['r', 'g'])
assert_allclose(cm[-1].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.5].rgba, [[.5, .5, 0, 1]])
assert_allclose(cm[1.].rgba, [[0, 1, 0, 1]])
cm = Colormap(['r', 'g', 'b'])
assert_allclose(cm[-1].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[.5].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[1].rgba, [[0, 0, 1, 1]])
assert_allclose(cm[2].rgba, [[0, 0, 1, 1]])
cm = Colormap(['r', 'g', 'b'], [0., 0.1, 1.0])
assert_allclose(cm[-1].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[.1].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[1].rgba, [[0, 0, 1, 1]], 1e-6, 1e-6)
assert_allclose(cm[2].rgba, [[0, 0, 1, 1]], 1e-6, 1e-6)
def test_colormap_discrete():
"""Test discrete colormaps."""
cm = Colormap(['r', 'g'], interpolation='zero')
assert_allclose(cm[-1].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.49].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.51].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[1.].rgba, [[0, 1, 0, 1]])
cm = Colormap(['r', 'g', 'b'], interpolation='zero')
assert_allclose(cm[-1].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[.32].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[.34].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[.66].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[.67].rgba, [[0, 0, 1, 1]])
assert_allclose(cm[.99].rgba, [[0, 0, 1, 1]])
assert_allclose(cm[1].rgba, [[0, 0, 1, 1]])
assert_allclose(cm[1.1].rgba, [[0, 0, 1, 1]])
cm = Colormap(['r', 'g', 'b'], [0., 0.1, 0.8, 1.0],
interpolation='zero')
assert_allclose(cm[-1].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[0.].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[.099].rgba, [[1, 0, 0, 1]])
assert_allclose(cm[.101].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[.799].rgba, [[0, 1, 0, 1]])
assert_allclose(cm[.801].rgba, [[0, 0, 1, 1]])
assert_allclose(cm[1].rgba, [[0, 0, 1, 1]], 1e-6, 1e-6)
assert_allclose(cm[2].rgba, [[0, 0, 1, 1]], 1e-6, 1e-6)
def test_colormap():
"""Test named colormaps."""
autumn = get_colormap('autumn')
assert autumn.glsl_map != ""
assert len(autumn[0.]) == 1
assert len(autumn[0.5]) == 1
assert len(autumn[1.]) == 1
assert len(autumn[[0., 0.5, 1.]]) == 3
assert len(autumn[np.array([0., 0.5, 1.])]) == 3
fire = get_colormap('fire')
assert_array_equal(fire[0].rgba, np.ones((1, 4)))
assert_array_equal(fire[1].rgba, np.array([[1, 0, 0, 1]]))
grays = get_colormap('grays')
assert_array_equal(grays[.5].rgb, np.ones((1, 3)) * .5)
hot = get_colormap('hot')
assert_allclose(hot[0].rgba, [[0, 0, 0, 1]], 1e-6, 1e-6)
assert_allclose(hot[0.5].rgba, [[1, .52272022, 0, 1]], 1e-6, 1e-6)
assert_allclose(hot[1.].rgba, [[1, 1, 1, 1]], 1e-6, 1e-6)
# Test the GLSL and Python mapping.
for name in get_colormaps():
colormap = get_colormap(name)
Function(colormap.glsl_map)
colors = colormap[np.linspace(-2., 2., 50)]
assert colors.rgba.min() >= 0
assert colors.rgba.max() <= 1
def test_normalize():
"""Test the _normalize() function."""
from vispy.color.colormap import _normalize
for x in (-1, 0., .5, 1., 10., 20):
assert _normalize(x) == .5
assert_allclose(_normalize((-1., 0., 1.)), (0., .5, 1.))
assert_allclose(_normalize((-1., 0., 1.), 0., 1.),
(0., 0., 1.))
assert_allclose(_normalize((-1., 0., 1.), 0., 1., clip=False),
(-1., 0., 1.))
y = _normalize(np.random.randn(100, 5), -10., 10.)
assert_allclose([y.min(), y.max()], [0.2975, 1-0.2975], 1e-1, 1e-1)
run_tests_if_main()
| |
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking TCP connection factory.
"""
from __future__ import absolute_import, division, print_function
import functools
import socket
import numbers
import datetime
from tornado.concurrent import Future, future_add_done_callback
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado import gen
from tornado.netutil import Resolver
from tornado.platform.auto import set_close_exec
from tornado.gen import TimeoutError
from tornado.util import timedelta_to_seconds
_INITIAL_CONNECT_TIMEOUT = 0.3
class _Connector(object):
"""A stateless implementation of the "Happy Eyeballs" algorithm.
"Happy Eyeballs" is documented in RFC6555 as the recommended practice
for when both IPv4 and IPv6 addresses are available.
In this implementation, we partition the addresses by family, and
make the first connection attempt to whichever address was
returned first by ``getaddrinfo``. If that connection fails or
times out, we begin a connection in parallel to the first address
of the other family. If there are additional failures we retry
with other addresses, keeping one connection attempt per family
in flight at a time.
http://tools.ietf.org/html/rfc6555
"""
def __init__(self, addrinfo, connect):
self.io_loop = IOLoop.current()
self.connect = connect
self.future = Future()
self.timeout = None
self.connect_timeout = None
self.last_error = None
self.remaining = len(addrinfo)
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
self.streams = set()
@staticmethod
def split(addrinfo):
"""Partition the ``addrinfo`` list by address family.
Returns two lists. The first list contains the first entry from
``addrinfo`` and all others with the same family, and the
second list contains all other addresses (normally one list will
be AF_INET and the other AF_INET6, although non-standard resolvers
may return additional families).
"""
primary = []
secondary = []
primary_af = addrinfo[0][0]
for af, addr in addrinfo:
if af == primary_af:
primary.append((af, addr))
else:
secondary.append((af, addr))
return primary, secondary
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None):
self.try_connect(iter(self.primary_addrs))
self.set_timeout(timeout)
if connect_timeout is not None:
self.set_connect_timeout(connect_timeout)
return self.future
def try_connect(self, addrs):
try:
af, addr = next(addrs)
except StopIteration:
# We've reached the end of our queue, but the other queue
# might still be working. Send a final error on the future
# only when both queues are finished.
if self.remaining == 0 and not self.future.done():
self.future.set_exception(self.last_error or
IOError("connection failed"))
return
stream, future = self.connect(af, addr)
self.streams.add(stream)
future_add_done_callback(
future, functools.partial(self.on_connect_done, addrs, af, addr))
def on_connect_done(self, addrs, af, addr, future):
self.remaining -= 1
try:
stream = future.result()
except Exception as e:
if self.future.done():
return
# Error: try again (but remember what happened so we have an
# error to raise in the end)
self.last_error = e
self.try_connect(addrs)
if self.timeout is not None:
# If the first attempt failed, don't wait for the
# timeout to try an address from the secondary queue.
self.io_loop.remove_timeout(self.timeout)
self.on_timeout()
return
self.clear_timeouts()
if self.future.done():
# This is a late arrival; just drop it.
stream.close()
else:
self.streams.discard(stream)
self.future.set_result((af, addr, stream))
self.close_streams()
def set_timeout(self, timeout):
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
self.on_timeout)
def on_timeout(self):
self.timeout = None
if not self.future.done():
self.try_connect(iter(self.secondary_addrs))
def clear_timeout(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
def set_connect_timeout(self, connect_timeout):
self.connect_timeout = self.io_loop.add_timeout(
connect_timeout, self.on_connect_timeout)
def on_connect_timeout(self):
if not self.future.done():
self.future.set_exception(TimeoutError())
self.close_streams()
def clear_timeouts(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
if self.connect_timeout is not None:
self.io_loop.remove_timeout(self.connect_timeout)
def close_streams(self):
for stream in self.streams:
stream.close()
class TCPClient(object):
"""A non-blocking TCP connection factory.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, resolver=None):
if resolver is not None:
self.resolver = resolver
self._own_resolver = False
else:
self.resolver = Resolver()
self._own_resolver = True
def close(self):
if self._own_resolver:
self.resolver.close()
@gen.coroutine
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
max_buffer_size=None, source_ip=None, source_port=None,
timeout=None):
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
``ssl_options`` is not None).
Using the ``source_ip`` kwarg, one can specify the source
IP address to use when establishing the connection.
In case the user needs to resolve and
use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg.
.. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments.
.. versionchanged:: 5.0
Added the ``timeout`` argument.
"""
if timeout is not None:
if isinstance(timeout, numbers.Real):
timeout = IOLoop.current().time() + timeout
elif isinstance(timeout, datetime.timedelta):
timeout = IOLoop.current().time() + timedelta_to_seconds(timeout)
else:
raise TypeError("Unsupported timeout %r" % timeout)
if timeout is not None:
addrinfo = yield gen.with_timeout(
timeout, self.resolver.resolve(host, port, af))
else:
addrinfo = yield self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo,
functools.partial(self._create_stream, max_buffer_size,
source_ip=source_ip, source_port=source_port)
)
af, addr, stream = yield connector.start(connect_timeout=timeout)
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
if timeout is not None:
stream = yield gen.with_timeout(timeout, stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host))
else:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
raise gen.Return(stream)
def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
source_port=None):
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
source_port_bind = source_port if isinstance(source_port, int) else 0
source_ip_bind = source_ip
if source_port_bind and not source_ip:
# User required a specific port, but did not specify
# a certain source IP, will bind to the default loopback.
source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
# Trying to use the same address family as the requested af socket:
# - 127.0.0.1 for IPv4
# - ::1 for IPv6
socket_obj = socket.socket(af)
set_close_exec(socket_obj.fileno())
if source_port_bind or source_ip_bind:
# If the user requires binding also to a specific IP/port.
try:
socket_obj.bind((source_ip_bind, source_port_bind))
except socket.error:
socket_obj.close()
# Fail loudly if unable to use the IP/port.
raise
try:
stream = IOStream(socket_obj,
max_buffer_size=max_buffer_size)
except socket.error as e:
fu = Future()
fu.set_exception(e)
return fu
else:
return stream, stream.connect(addr)
| |
from ..rfb_utils.rfb_node_desc_utils.rfb_node_desc import RfbNodeDesc
from ..rfb_utils import filepath_utils
from ..rfb_utils.filepath import FilePath
from ..rfb_utils import generate_property_utils
from ..rfb_utils.property_callbacks import *
from ..rfb_utils.rman_socket_utils import node_add_inputs
from ..rfb_utils.rman_socket_utils import node_add_outputs
from ..rfb_utils import shadergraph_utils
from ..rfb_logger import rfb_log
from ..rfb_utils.envconfig_utils import envconfig
from .. import rfb_icons
from .. import rman_config
from ..rman_properties import rman_properties_renderlayers
from ..rman_properties import rman_properties_world
from ..rman_properties import rman_properties_camera
from ..rman_constants import RFB_ARRAYS_MAX_LEN
from ..rman_constants import CYCLES_NODE_MAP
from nodeitems_utils import NodeCategory, NodeItem
from collections import OrderedDict
from bpy.props import *
import bpy
import os
import sys
import traceback
import nodeitems_utils
from operator import attrgetter
# registers
from . import rman_bl_nodes_sockets
from . import rman_bl_nodes_shaders
from . import rman_bl_nodes_ops
from . import rman_bl_nodes_props
from . import rman_bl_nodes_menus
__RMAN_DISPLAY_NODES__ = []
__RMAN_BXDF_NODES__ = []
__RMAN_DISPLACE_NODES__ = []
__RMAN_INTEGRATOR_NODES__ = []
__RMAN_PROJECTION_NODES__ = []
__RMAN_DISPLAYFILTER_NODES__ = []
__RMAN_SAMPLEFILTER_NODES__ = []
__RMAN_PATTERN_NODES__ = []
__RMAN_LIGHT_NODES__ = []
__RMAN_LIGHTFILTER_NODES__ = []
__RMAN_NODE_TYPES__ = dict()
__RMAN_NODE_CATEGORIES__ = dict()
__RMAN_NODE_CATEGORIES__['bxdf'] = dict()
__RMAN_NODE_CATEGORIES__['light'] = dict()
__RMAN_NODE_CATEGORIES__['pattern'] = dict()
__RMAN_NODE_CATEGORIES__['displace'] = dict()
__RMAN_NODE_CATEGORIES__['samplefilter'] = dict()
__RMAN_NODE_CATEGORIES__['displayfilter'] = dict()
__RMAN_NODE_CATEGORIES__['integrator'] = dict()
__RMAN_NODE_CATEGORIES__['projection'] = dict()
__RMAN_NODE_CATEGORIES__['bxdf']['bxdf_misc'] = (('RenderMan Misc Bxdfs', []), [])
__RMAN_NODE_CATEGORIES__['light']['light'] = (('RenderMan Lights', []), [])
__RMAN_NODE_CATEGORIES__['pattern']['patterns_misc'] = (('RenderMan Misc Patterns', []), [])
__RMAN_NODE_CATEGORIES__['displace']['displace'] = (('RenderMan Displacements', []), [])
__RMAN_NODE_CATEGORIES__['samplefilter']['samplefilter'] = (('RenderMan SampleFilters', []), [])
__RMAN_NODE_CATEGORIES__['displayfilter']['displayfilter'] = (('RenderMan DisplayFilters', []), [])
__RMAN_NODE_CATEGORIES__['integrator']['integrator'] = (('RenderMan Integrators', []), [])
__RMAN_NODE_CATEGORIES__['projection']['projection'] = (('RenderMan Projections', []), [])
__RMAN_NODES__ = {
'displaydriver': __RMAN_DISPLAY_NODES__,
'bxdf': __RMAN_BXDF_NODES__,
'displace': __RMAN_DISPLACE_NODES__,
'integrator': __RMAN_INTEGRATOR_NODES__,
'projection': __RMAN_PROJECTION_NODES__,
'displayfilter': __RMAN_DISPLAYFILTER_NODES__,
'samplefilter': __RMAN_SAMPLEFILTER_NODES__,
'pattern': __RMAN_PATTERN_NODES__,
'light': __RMAN_LIGHT_NODES__,
'lightfilter': __RMAN_LIGHTFILTER_NODES__
}
__RMAN_PLUGIN_MAPPING__ = {
'displaydriver': rman_properties_renderlayers.RendermanAOV,
'projection': rman_properties_camera.RendermanCameraSettings
}
__RMAN_NODES_NO_REGISTER__ = [
'PxrCombinerLightFilter.args',
'PxrSampleFilterCombiner.args',
'PxrDisplayFilterCombiner.args',
'PxrShadowDisplayFilter.args',
'PxrShadowFilter.args',
'PxrDisplace.oso',
'PxrSeExpr.args'
]
# map RenderMan name to Blender node name
# ex: PxrStylizedControl -> PxrStylizedControlPatternNode
__BL_NODES_MAP__ = dict()
__CYCLES_NODE_DESC_MAP__ = dict()
__RMAN_NODES_ALREADY_REGISTERED__ = False
def get_cycles_node_desc(node):
from ..rfb_utils.filepath import FilePath
global __CYCLES_NODE_DESC_MAP__
mapping = CYCLES_NODE_MAP.get(node.bl_idname, None)
if not mapping:
return (None, None)
node_desc = __CYCLES_NODE_DESC_MAP__.get(mapping, None)
if not node_desc:
shader_path = FilePath(filepath_utils.get_cycles_shader_path()).join(FilePath('%s.oso' % mapping))
node_desc = RfbNodeDesc(shader_path)
__CYCLES_NODE_DESC_MAP__[mapping] = node_desc
return (mapping, node_desc)
def class_generate_properties(node, parent_name, node_desc):
prop_names = []
prop_meta = {}
output_meta = OrderedDict()
if "__annotations__" not in node.__dict__:
setattr(node, "__annotations__", {})
# pxr osl and seexpr need these to find the code
if parent_name in ["PxrOSL"]:
# Enum for internal, external type selection
EnumName = "codetypeswitch"
if parent_name == 'PxrOSL':
EnumProp = EnumProperty(items=(('EXT', "External", ""),
('INT', "Internal", "")),
name="Shader Location", default='INT')
else:
EnumProp = EnumProperty(items=(('NODE', "Node", ""),
('INT', "Internal", "")),
name="Expr Location", default='NODE')
EnumMeta = {'renderman_name': 'filename',
'name': 'codetypeswitch',
'renderman_type': 'string',
'default': '', 'label': 'codetypeswitch',
'type': 'enum', 'options': '',
'widget': 'mapper', '__noconnection': True}
node.__annotations__[EnumName] = EnumProp
prop_names.append(EnumName)
prop_meta[EnumName] = EnumMeta
# Internal file search prop
InternalName = "internalSearch"
InternalProp = StringProperty(name="Shader to use",
description="Storage space for internal text data block",
default="")
InternalMeta = {'renderman_name': 'filename',
'name': 'internalSearch',
'renderman_type': 'string',
'default': '', 'label': 'internalSearch',
'type': 'string', 'options': '',
'widget': 'fileinput', '__noconnection': True}
node.__annotations__[InternalName] = InternalProp
prop_names.append(InternalName)
prop_meta[InternalName] = InternalMeta
# External file prop
codeName = "shadercode"
codeProp = StringProperty(name='External File', default='',
subtype="FILE_PATH", description='')
codeMeta = {'renderman_name': 'filename',
'name': 'ShaderCode', 'renderman_type': 'string',
'default': '', 'label': 'ShaderCode',
'type': 'string', 'options': '',
'widget': 'fileinput', '__noconnection': True}
node.__annotations__[codeName] = codeProp
prop_names.append(codeName)
prop_meta[codeName] = codeMeta
# inputs
for node_desc_param in node_desc.params:
update_function = None
if node_desc.node_type == 'integrator':
update_function = update_integrator_func
else:
update_function = update_func_with_inputs if 'enable' in node_desc_param.name else update_func
if not update_function:
update_function = update_func
if node_desc_param.is_array():
# this is an array
if generate_property_utils.generate_array_property(node, prop_names, prop_meta, node_desc_param, update_function=update_function):
continue
name, meta, prop = generate_property_utils.generate_property(node, node_desc_param, update_function=update_function)
if name is None:
continue
if hasattr(node_desc_param, 'page') and node_desc_param.page != '':
page = node_desc_param.page
tokens = page.split('|')
sub_prop_names = prop_names
page_name = tokens[0]
if page_name not in prop_meta:
# For pages, add a BoolProperty called '[page_name].uio'
# This determines whether the page is opened or closed
sub_prop_names.append(page_name)
prop_meta[page_name] = {'renderman_type': 'page', 'renderman_name': page_name}
ui_label = "%s_uio" % page_name
dflt = getattr(node_desc_param, 'page_open', False)
node.__annotations__[ui_label] = BoolProperty(name=ui_label, default=dflt)
setattr(node, page_name, [])
# If this a PxrSurface node, add an extra BoolProperty to control
# enabling/disabling each lobe
if parent_name == 'PxrSurface' and 'Globals' not in page_name:
enable_param_name = 'enable' + page_name.replace(' ', '')
if enable_param_name not in prop_meta:
prop_meta[enable_param_name] = {
'renderman_type': 'enum', 'renderman_name': enable_param_name}
default = page_name == 'Diffuse'
enable_param_prop = BoolProperty(name="Enable " + page_name,
default=bool(default),
update=update_func_with_inputs)
node.__annotations__[enable_param_name] = enable_param_prop
page_prop_names = getattr(node, page_name)
if enable_param_name not in page_prop_names:
page_prop_names.append(enable_param_name)
setattr(node, page_name, page_prop_names)
if len(tokens) > 1:
for i in range(1, len(tokens)):
parent_page = page_name
page_name += '.' + tokens[i]
if page_name not in prop_meta:
prop_meta[page_name] = {'renderman_type': 'page', 'renderman_name': page_name}
ui_label = "%s_uio" % page_name
dflt = getattr(node_desc_param, 'page_open', False)
node.__annotations__[ui_label] = BoolProperty(name=ui_label, default=dflt)
setattr(node, page_name, [])
sub_prop_names = getattr(node, parent_page)
if page_name not in sub_prop_names:
sub_prop_names.append(page_name)
setattr(node, parent_page, sub_prop_names)
sub_prop_names = getattr(node, page_name)
sub_prop_names.append(name)
setattr(node, page_name, sub_prop_names)
prop_meta[name] = meta
node.__annotations__[name] = prop
else:
prop_names.append(name)
prop_meta[name] = meta
node.__annotations__[name] = prop
# outputs
for node_desc_param in node_desc.outputs:
renderman_type = node_desc_param.type
prop_name = node_desc_param.name
output_prop_meta = dict()
if hasattr(node_desc_param, 'vstructmember'):
output_prop_meta['vstructmember'] = node_desc_param.vstructmember
if hasattr(node_desc_param, 'vstructConditionalExpr'):
output_prop_meta['vstructConditionalExpr'] = node_desc_param.vstructConditionalExpr
if hasattr(node_desc_param, 'vstruct'):
output_prop_meta['vstruct'] = True
if hasattr(node_desc_param, 'struct_name'):
output_prop_meta['struct_name'] = node_desc_param.struct_name
output_prop_meta['name'] = node_desc_param.name
output_meta[prop_name] = output_prop_meta
output_meta[prop_name]['renderman_type'] = renderman_type
setattr(node, 'prop_names', prop_names)
setattr(node, 'prop_meta', prop_meta)
setattr(node, 'output_meta', output_meta)
def generate_node_type(node_desc, is_oso=False):
''' Dynamically generate a node type from pattern '''
name = node_desc.name
nodeType = node_desc.node_type
nodeDict = {'bxdf': rman_bl_nodes_shaders.RendermanBxdfNode,
'pattern': rman_bl_nodes_shaders.RendermanPatternNode,
'displace': rman_bl_nodes_shaders.RendermanDisplacementNode,
'light': rman_bl_nodes_shaders.RendermanLightNode,
'lightfilter': rman_bl_nodes_shaders.RendermanLightfilterNode,
'samplefilter': rman_bl_nodes_shaders.RendermanSamplefilterNode,
'displayfilter': rman_bl_nodes_shaders.RendermanDisplayfilterNode,
'integrator': rman_bl_nodes_shaders.RendermanIntegratorNode,
'projection': rman_bl_nodes_shaders.RendermanProjectionNode}
if nodeType not in nodeDict.keys():
return (None, None)
typename = '%s%sNode' % (name, nodeType.capitalize())
ntype = type(typename, (nodeDict[nodeType],), {})
ntype.bl_label = name
ntype.typename = typename
description = getattr(node_desc, 'help')
if not description:
description = name
ntype.bl_description = description
def init(self, context):
# add input/output sockets to nodes, based on type
if self.renderman_node_type == 'bxdf':
self.outputs.new('RendermanNodeSocketBxdf', "Bxdf")
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
elif self.renderman_node_type == 'light':
node_add_inputs(self, name, self.prop_names)
self.outputs.new('RendermanNodeSocketLight', "Light")
elif self.renderman_node_type == 'lightfilter':
node_add_inputs(self, name, self.prop_names)
self.outputs.new('RendermanNodeSocketLightFilter', "LightFilter")
elif self.renderman_node_type == 'displace':
self.outputs.new('RendermanNodeSocketDisplacement', "Displacement")
node_add_inputs(self, name, self.prop_names)
elif self.renderman_node_type == 'displayfilter':
self.outputs.new('RendermanNodeSocketDisplayFilter', "DisplayFilter")
node_add_inputs(self, name, self.prop_names)
elif self.renderman_node_type == 'samplefilter':
self.outputs.new('RendermanNodeSocketSampleFilter', "SampleFilter")
node_add_inputs(self, name, self.prop_names)
elif self.renderman_node_type == 'integrator':
self.outputs.new('RendermanNodeSocketIntegrator', "Integrator")
node_add_inputs(self, name, self.prop_names)
elif self.renderman_node_type == 'projection':
self.outputs.new('RendermanNodeSocketProjection', "Projection")
node_add_inputs(self, name, self.prop_names)
elif name == "PxrOSL":
self.outputs.clear()
# else pattern
else:
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
# deal with any ramps necessary
color_rman_ramps = self.__annotations__.get('__COLOR_RAMPS__', [])
float_rman_ramps = self.__annotations__.get('__FLOAT_RAMPS__', [])
if color_rman_ramps or float_rman_ramps:
node_group = bpy.data.node_groups.new(
'__RMAN_FAKE_NODEGROUP__', 'ShaderNodeTree')
node_group.use_fake_user = True
self.rman_fake_node_group = node_group.name
for ramp_name in color_rman_ramps:
n = node_group.nodes.new('ShaderNodeValToRGB')
knots = None
knots_name = '%s_Knots' % ramp_name
cols = None
cols_name = '%s_Colors' % ramp_name
for node_desc_param in node_desc.params:
if node_desc_param.name == knots_name:
knots = node_desc_param
elif node_desc_param.name == cols_name:
cols = node_desc_param
elements = n.color_ramp.elements
prev_val = None
for i in range(0, len(knots.default)):
if not prev_val:
prev_val = cols.default[i]
elif prev_val == cols.default[i]:
continue
prev_val = cols.default[i]
if i == 0:
elem = elements[0]
elem.position = knots.default[i]
else:
elem = elements.new(knots.default[i])
elem.color = (cols.default[i][0], cols.default[i][1], cols.default[i][2], 1.0)
setattr(self, ramp_name, n.name)
for ramp_name in float_rman_ramps:
n = node_group.nodes.new('ShaderNodeVectorCurve')
knots = None
knots_name = '%s_Knots' % ramp_name
vals = None
vals_name = '%s_Floats' % ramp_name
for node_desc_param in node_desc.params:
if node_desc_param.name == knots_name:
knots = node_desc_param
elif node_desc_param.name == vals_name:
vals = node_desc_param
curve = n.mapping.curves[0]
n.mapping.clip_min_x = 0.0
n.mapping.clip_min_y = 0.0
points = curve.points
prev_val = None
for i in range(0, len(knots.default)):
if not prev_val:
prev_val = vals.default[i]
elif prev_val == vals.default[i]:
continue
prev_val = vals.default[i]
if i == 0:
point = points[0]
point.location[0] = knots.default[i]
point.location[0] = vals.default[i]
else:
points.new(knots.default[i], vals.default[i])
setattr(self, ramp_name, n.name)
self.__annotations__['__COLOR_RAMPS__'] = color_rman_ramps
self.__annotations__['__FLOAT_RAMPS__'] = float_rman_ramps
update_conditional_visops(self)
def free(self):
if self.rman_fake_node_group in bpy.data.node_groups:
bpy.data.node_groups.remove(bpy.data.node_groups[self.rman_fake_node_group])
ntype.init = init
ntype.free = free
if "__annotations__" not in ntype.__dict__:
setattr(ntype, "__annotations__", {})
# the name of our fake node group to hold all of our ramp nodes
ntype.__annotations__['rman_fake_node_group'] = StringProperty('__rman_ramps__', default='')
ntype.__annotations__['plugin_name'] = StringProperty(name='Plugin Name',
default=name, options={'HIDDEN'})
class_generate_properties(ntype, name, node_desc)
if nodeType == 'light':
ntype.__annotations__['light_primary_visibility'] = BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
default=True)
elif nodeType in ['samplefilter', 'displayfilter']:
ntype.__annotations__['is_active'] = BoolProperty(
name="Active",
description="Enable or disable this filter",
default=True)
bpy.utils.register_class(ntype)
if nodeType == 'pattern' and is_oso:
# This is mainly here for backwards compatability
#
# Originally, we postfix the class name with OSLNode
# when loading OSL pattern nodes. However, this would have
# caused problems when all of our C++ pattern nodes
# become OSL shaders; older scenes that were using the C++
# patterns will break because the old class name will not
# exist anymore.
#
# We now register every pattern node with the none postfix
# name. However, this will now break all of the scenes that
# were created during the 24.0 beta, including our example scenes.
# Rather than try to come up with some fancy post load handler, just
# register the pattern node again with the postfix name.
#
# This code should definitely be removed in the future.
osl_node_typename = '%s%sOSLNode' % (name, nodeType.capitalize())
osl_node_type = type(osl_node_typename, (nodeDict[nodeType],), {})
osl_node_type.bl_label = name
osl_node_type.typename = typename
osl_node_type.init = init
osl_node_type.free = free
osl_node_type.bl_description = ntype.bl_description
if "__annotations__" not in osl_node_type.__dict__:
setattr(osl_node_type, "__annotations__", {})
osl_node_type.__annotations__['rman_fake_node_group'] = StringProperty('__rman_ramps__', default='')
osl_node_type.__annotations__['plugin_name'] = StringProperty(name='Plugin Name',
default=name, options={'HIDDEN'})
class_generate_properties(osl_node_type, name, node_desc)
bpy.utils.register_class(osl_node_type)
return (typename, ntype)
def register_plugin_to_parent(ntype, name, node_desc, plugin_type, parent):
class_generate_properties(ntype, name, node_desc)
setattr(ntype, 'renderman_node_type', plugin_type)
if "__annotations__" not in parent.__dict__:
setattr(parent, "__annotations__", {})
# register and add to scene_settings
bpy.utils.register_class(ntype)
settings_name = "%s_settings" % name
parent.__annotations__["%s_settings" % name] = PointerProperty(type=ntype, name="%s Settings" % name)
if "__annotations__" not in rman_properties_world.RendermanWorldSettings.__dict__:
setattr(rman_properties_world.RendermanWorldSettings, "__annotations__", {})
def register_plugin_types(node_desc):
items = []
if node_desc.node_type not in __RMAN_PLUGIN_MAPPING__:
return
parent = __RMAN_PLUGIN_MAPPING__[node_desc.node_type]
name = node_desc.name
if node_desc.node_type == 'displaydriver':
# remove the d_ prefix
name = name.split('d_')[1]
typename = name + node_desc.node_type.capitalize() + 'Settings'
ntype = type(typename, (rman_bl_nodes_props.RendermanPluginSettings,), {})
ntype.bl_label = name
ntype.typename = typename
ntype.bl_idname = typename
ntype.plugin_name = name
description = getattr(node_desc, 'help')
if not description:
description = name
ntype.bl_description = description
try:
register_plugin_to_parent(ntype, name, node_desc, node_desc.node_type, parent)
except Exception as e:
rfb_log().error("Error registering plugin ", name)
traceback.print_exc()
class RendermanWorldShaderNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
rd = context.scene.render
if rd.engine != 'PRMAN_RENDER':
return False
return context.space_data.tree_type == 'ShaderNodeTree' and context.space_data.shader_type == 'WORLD'
class RendermanShaderNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
rd = context.scene.render
if rd.engine != 'PRMAN_RENDER':
return False
return context.space_data.tree_type == 'ShaderNodeTree' and context.space_data.shader_type == 'OBJECT'
class RendermanNodeItem(NodeItem):
'''
Custom NodeItem class so that we can modify the way the category menus
are drawn.
'''
def draw(self, item, layout, context):
# skip everything but our submenu item
if item.nodetype != '__RenderMan_Node_Menu__':
return
if context.space_data.shader_type == 'OBJECT':
mat = getattr(context, 'material', None)
if not mat:
return
if not shadergraph_utils.is_renderman_nodetree(mat):
rman_icon = rfb_icons.get_icon('rman_graph')
layout.operator(
'material.rman_add_rman_nodetree', icon_value=rman_icon.icon_id).idtype = "material"
else:
nt = mat.node_tree
layout.context_pointer_set("nodetree", nt)
layout.menu('NODE_MT_RM_Bxdf_Category_Menu')
layout.menu('NODE_MT_RM_Displacement_Category_Menu')
layout.menu('NODE_MT_RM_Pattern_Category_Menu')
layout.menu('NODE_MT_RM_PxrSurface_Category_Menu')
layout.menu('NODE_MT_RM_Light_Category_Menu')
elif context.space_data.shader_type == 'WORLD':
world = context.scene.world
if not world.renderman.use_renderman_node:
rman_icon = rfb_icons.get_icon('rman_graph')
layout.operator('material.rman_add_rman_nodetree', icon_value=rman_icon.icon_id).idtype = 'world'
else:
nt = world.node_tree
layout.context_pointer_set("nodetree", nt)
layout.menu('NODE_MT_RM_Integrators_Category_Menu')
layout.menu('NODE_MT_RM_SampleFilter_Category_Menu')
layout.menu('NODE_MT_RM_DisplayFilter_Category_Menu')
def register_rman_nodes():
global __RMAN_NODE_CATEGORIES__
rfb_log().debug("Registering RenderMan Plugin Nodes:")
path_list = envconfig().get_shader_registration_paths()
visited = set()
for path in path_list:
for root, dirnames, filenames in os.walk(path):
# Prune this branch if we've already visited it (e.g., one path
# in the path list is actually a subdirectory of another).
real = os.path.realpath(root)
if real in visited:
dirnames[:] = []
continue
visited.add(real)
for filename in sorted(filenames):
if filename.endswith(('.args', '.oso')):
# skip registering these nodes
if filename in __RMAN_NODES_NO_REGISTER__:
continue
is_oso = False
is_args = True
if filename.endswith('.oso'):
is_oso = True
is_args = False
node_desc = RfbNodeDesc(FilePath(root).join(FilePath(filename)))
# apply any overrides
rman_config.apply_args_overrides(filename, node_desc)
__RMAN_NODES__[node_desc.node_type].append(node_desc)
rfb_log().debug("\t%s" % node_desc.name)
# These plugin types are special. They are not actually shading
# nodes that can be used in Blender's shading editor, but
# we still create PropertyGroups for them so they can be inserted
# into the correct UI panel.
if node_desc.node_type in ['displaydriver']:
register_plugin_types(node_desc)
continue
typename, nodetype = generate_node_type(node_desc, is_oso=is_oso)
if not typename and not nodetype:
continue
if typename and nodetype:
__RMAN_NODE_TYPES__[typename] = nodetype
__BL_NODES_MAP__[node_desc.name] = typename
# categories
node_item = RendermanNodeItem(typename, label=nodetype.bl_label)
if node_desc.node_type == 'pattern':
classification = getattr(node_desc, 'classification', '')
if classification and classification != '':
tokens = classification.split('/')
category = tokens[-1].lower()
category_nice_name = category.capitalize()
# category seems empty. Put in misc
if category == '':
category = 'misc'
lst = __RMAN_NODE_CATEGORIES__['pattern'].get('patterns_%s' % category, None)
if not lst:
lst = (('RenderMan %s Patterns' % category_nice_name, []), [])
lst[0][1].append(node_item)
lst[1].append(node_desc)
__RMAN_NODE_CATEGORIES__['pattern']['patterns_%s' % category] = lst
else:
__RMAN_NODE_CATEGORIES__['pattern']['patterns_misc'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['pattern']['patterns_misc'][1].append(node_desc)
elif node_desc.node_type == 'bxdf':
classification = getattr(node_desc, 'classification', '')
if classification and classification != '':
tokens = classification.split(':')
category = ''
# first, find rendernode
for token in tokens:
if token.startswith('rendernode'):
category = token
continue
# if we didn't find anything, put this into the misc. cateogry
if category == '' or ('bxdf' not in category):
__RMAN_NODE_CATEGORIES__['bxdf']['bxdf_misc'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['bxdf']['bxdf_misc'][1].append(node_desc)
continue
# now, split on /, and look for bxdf
tokens = category.split('/')
i = 0
for i,token in enumerate(tokens):
if token == 'bxdf':
# found bxdf, all the tokens after are the category
i += 1
break
category = '_'.join(tokens[i:])
category_nice_name = ''
for token in tokens[i:]:
if category_nice_name != '':
category_nice_name += '/'
category_nice_name += token.capitalize()
lst = __RMAN_NODE_CATEGORIES__['bxdf'].get('bxdf_%s' % category, None)
if not lst:
lst = (('RenderMan %s Bxdf' % category_nice_name, []), [])
lst[0][1].append(node_item)
lst[1].append(node_desc)
__RMAN_NODE_CATEGORIES__['bxdf']['bxdf_%s' % category] = lst
else:
__RMAN_NODE_CATEGORIES__['bxdf']['bxdf_misc'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['bxdf']['bxdf_misc'][1].append(node_desc)
elif node_desc.node_type == 'displace':
__RMAN_NODE_CATEGORIES__['displace']['displace'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['displace']['displace'][1].append(node_desc)
elif node_desc.node_type == 'light':
__RMAN_NODE_CATEGORIES__['light']['light'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['light']['light'][1].append(node_desc)
elif node_desc.node_type == 'samplefilter':
__RMAN_NODE_CATEGORIES__['samplefilter']['samplefilter'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['samplefilter']['samplefilter'][1].append(node_desc)
elif node_desc.node_type == 'displayfilter':
__RMAN_NODE_CATEGORIES__['displayfilter']['displayfilter'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['displayfilter']['displayfilter'][1].append(node_desc)
elif node_desc.node_type == 'integrator':
__RMAN_NODE_CATEGORIES__['integrator']['integrator'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['integrator']['integrator'][1].append(node_desc)
elif node_desc.node_type == 'projection':
__RMAN_NODE_CATEGORIES__['projection']['projection'][0][1].append(node_item)
__RMAN_NODE_CATEGORIES__['projection']['projection'][1].append(node_desc)
rfb_log().debug("Finished Registering RenderMan Plugin Nodes.")
def register_node_categories():
node_categories = []
all_items = []
all_items.append(RendermanNodeItem('__RenderMan_Node_Menu__', label='RenderMan'))
# we still need to register our nodes for our category
# otherwise, they won't show up in th search
for k in ['bxdf', 'displace', 'light', 'pattern']:
v = __RMAN_NODE_CATEGORIES__[k]
for name, ((desc, items), lst) in v.items():
if items:
if k == 'light':
# we only want PxrMeshLight
for i in items:
if i.label == 'PxrMeshLight':
all_items.append(i)
break
else:
all_items.extend(items)
shader_category = RendermanShaderNodeCategory('RenderMan', 'RenderMan', items=all_items)
node_categories.append(shader_category)
all_items = []
all_items.append(RendermanNodeItem('__RenderMan_Node_Menu__', label='RenderMan'))
for k in ['integrator', 'displayfilter', 'samplefilter']:
v = __RMAN_NODE_CATEGORIES__[k]
for name, ((desc, items), lst) in v.items():
if items:
all_items.extend(items)
shader_category = RendermanWorldShaderNodeCategory('RenderMan', 'RenderMan', items=all_items)
node_categories.append(shader_category)
nodeitems_utils.register_node_categories("RENDERMANSHADERNODES",
node_categories)
def register():
global __RMAN_NODES_ALREADY_REGISTERED__
if not __RMAN_NODES_ALREADY_REGISTERED__:
register_rman_nodes()
__RMAN_NODES_ALREADY_REGISTERED__ = True
register_node_categories()
rman_bl_nodes_props.register()
rman_bl_nodes_sockets.register()
rman_bl_nodes_shaders.register()
rman_bl_nodes_ops.register()
rman_bl_nodes_menus.register()
def unregister():
try:
nodeitems_utils.unregister_node_categories("RENDERMANSHADERNODES")
except RuntimeError:
rfb_log().debug('Could not unregister node categories class: RENDERMANSHADERNODES')
pass
rman_bl_nodes_props.unregister()
rman_bl_nodes_sockets.unregister()
rman_bl_nodes_shaders.unregister()
rman_bl_nodes_ops.unregister()
rman_bl_nodes_menus.unregister()
| |
"""SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__( self, env, nodefault = True, alias = None, precious = True
, noclean = True ):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory = None, create = 1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files = None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = [ 'LINGUAS' ]
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [ l for l in _re_lang.findall(contents) if l ]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
#* The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
#* The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env,linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute( self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [ target ]
result = []
for tgt in target:
r = BuilderBase._execute( self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=None, source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
if target is None: target = []
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current working directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = os.path.relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env : 0
if env.has_key('POAUTOINIT'):
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this our fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if env.has_key('XGETTEXT'):
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound,"Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if env.has_key('MSGINIT'):
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if env.has_key('MSGMERGE'):
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if env.has_key('MSGFMT'):
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return [ 'xgettext', 'msginit', 'msgmerge', 'msgfmt' ]
#############################################################################
| |
# Copyright (c) 2017 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from functools import total_ordering
from itertools import chain
import pandas as pd
import numpy as np
from ...graph.joinables import JoinBase
from ..exc import StateError
debug = True
debug_more = True
# only one jo need one joined, or "selective"
ONE = "ONE"
# all jos needs to be joined once
ALL = "ALL"
# free, no constraints, only needs to be joined
ANY = "ANY"
JoinTypes = {ONE, ALL, ANY}
class JoiningProject(object):
def __init__(self, name, join_objs):
assert isinstance(name, str)
self.name = name
self.works_byjo = {}
self.cnt_tojoin = 0
self.cnt_bejoin = 0
self.from_items = []
self.to_items = []
for jo in join_objs:
assert jo not in self.works_byjo
self.works_byjo[jo] = PandasIndexer(join_obj=jo)
def load_fromitem(self, join_objs, **kwds):
assert join_objs
from_item = JoinItem(join_objs=join_objs, **kwds)
self.from_items.append(from_item)
for jo in join_objs:
join_work = self.works_byjo.get(jo)
if not join_work:
raise RuntimeError("Invalid join_obj %r from project %s" % (
join_obj, self.name))
join_work.load_fromitem(from_item)
def load_toitem(self, join_objs, **kwds):
assert join_objs
to_item = JoinItem(strategy=ONE, join_objs=join_objs, **kwds)
self.to_items.append(to_item)
for jo in join_objs:
join_work = self.works_byjo.get(jo)
if not join_work:
raise RuntimeError("Invalid join_obj %r from project %s" % (
join_obj, self.name))
join_work.load_toitem(to_item)
def yield_results(self, target_byname):
for jo_work in self.works_byjo.values():
for jo, from_, to_ in jo_work.yield_results(target_byname):
yield jo, from_, to_
for jo, from_, to_ in self.yield_empty():
yield jo, from_, to_
def yield_empty(self):
print(self.name+":")
cnt_success = 0
cnt_fail = 0
for item in self.from_items:
if item.is_success:
cnt_success += 1
else:
cnt_fail += 1
for jo in item.yield_empty():
yield jo, item.item, None
print(" from: %d success, %d failed" % (
cnt_success, cnt_fail))
cnt_success = 0
cnt_fail = 0
for item in self.to_items:
if item.is_success:
cnt_success += 1
else:
cnt_fail += 1
for jos in item.yield_empty():
yield jos, None, item.item
print(" to: %d success, %d failed" % (
cnt_success, cnt_fail))
# NOTE: no total_ordering because it will be grouped
@total_ordering
class JoinItem(object):
def __init__(self, strategy, seconds, env, item, join_objs):
assert strategy in JoinTypes
assert isinstance(seconds, float)
self.strategy = strategy
self.seconds = seconds
self.env = env
self.item = item
# jo -> list of items
self._peers = {}
self._has_peer = False
for jo in join_objs:
self._peers[jo] = set()
# or "is_not_empty"
@property
def is_success(self):
if self.strategy in {ONE, ANY}:
if self._has_peer:
return True
else:
return False
elif self.strategy == ALL:
for _, peers in self._peers.items():
if len(peers) == 0:
return False
elif len(peers) > 1:
raise RuntimeError("All has multiple joinitems in one jo")
return True
else:
raise NotImplementedError()
def __eq__(self, other):
return id(self) == id(other)
def __lt__(self, other):
return (self.seconds, id(self)) < (other.seconds, id(other))
def __hash__(self):
return id(self)
# NOTE: only yield when is_success is False!
# yield jos that are empty
def yield_empty(self):
if self.strategy in {ONE, ANY}:
if len(self._peers) >= 1:
yield self._peers.keys()
else:
raise RuntimeError("No jos in joinitem")
elif self.strategy == ALL:
for jo, peers in self._peers.items():
if len(peers) == 0:
yield [jo]
else:
raise NotImplementedError()
def set_peer(self, join_obj, peer):
assert isinstance(join_obj, JoinBase)
assert isinstance(peer, JoinItem)
if self.strategy in {ONE, ALL}:
if len(self._peers[join_obj]) > 0:
raise RuntimeError("In ALL/ONE, one jo can be joined only once")
self._has_peer = True
self._peers[join_obj].add(peer)
def is_joinable(self, join_obj):
if self.strategy == ONE:
if self._has_peer:
return False
else:
return True
elif self.strategy == ALL:
len_joins = len(self._peers[join_obj])
if len_joins == 1:
return False
elif len_joins > 1:
raise RuntimeError()
else:
return True
elif self.strategy == ANY:
return True
else:
raise NotImplementedError()
class IndexerBase(object):
__metaclass__ = ABCMeta
def __init__(self, join_obj):
assert isinstance(join_obj, JoinBase)
self.join_obj = join_obj
@property
def name(self):
return "%sIndexer" % self.join_obj.name
@property
def schemas(self):
return self.join_obj.schemas
@abstractmethod
def load_fromitem(self, from_item):
assert isinstance(from_item, JoinItem)
for from_, _ in self.schemas:
try:
from_item.env[from_]
except StateError as e:
raise StateError(
"Env error, incomplete from schema: %s from %r of %s!"
% (from_, from_item.item, self.join_obj.name), e)
@abstractmethod
def load_toitem(self, to_item):
assert isinstance(to_item, JoinItem)
for _, to_ in self.schemas:
try:
to_item.env[to_]
except StateError as e:
raise StateError(
"Env error, incomplete schema: %s from %r of %s!"
% (to_, to_item.item, self.join_obj.name), e)
@abstractmethod
def yield_results(self):
pass
class PandasIndexer(IndexerBase):
def __init__(self, **kwds):
super(PandasIndexer, self).__init__(**kwds)
self.from_items = []
self.to_items = []
self.cnt_success = 0
self.from_cnt_ignored = 0
self.from_cnt_nomatch = 0
self.from_cnt_novalidmatch = 0
self.from_cntmax_permatch = 0
self.from_total_matches = 0
self.from_occur_matches = 0
self.to_cnt_ignored = 0
self.to_cnt_nomatch = 0
self.to_cnt_novalidmatch = 0
self.to_cntmax_permatch = 0
self.to_total_matches = 0
self.to_occur_matches = 0
self.max_negative_offset = 0
self.total_negative_offset = 0
self.occur_negateve_offset = 0
def load_fromitem(self, from_item):
super(PandasIndexer, self).load_fromitem(from_item)
self.from_items.append(from_item)
def load_toitem(self, to_item):
super(PandasIndexer, self).load_toitem(to_item)
self.to_items.append(to_item)
def yield_results(self, target_byname):
print(self.join_obj.name+
"(%d -> %d): "%(len(self.from_items), len(self.to_items))+
repr(self.join_obj))
# index from_items columns(seconds, _item, str_schema), ordered by seconds
# calculate count ignored
## target alias translation: A(target_t) -> B(target), target_t needs translation
def get_value(item, schema, other):
ret = str(item.env[schema])
if "target" != schema and "target" == other:
if ret not in target_byname:
raise StateError("Cannot translate target %s" % ret)
ret = target_byname[ret].target
return ret
str_schema = self.join_obj.str_schema
columns = ["seconds", "_item", str_schema]
def generate_from_rows():
for item in self.from_items:
if item.is_joinable(self.join_obj):
yield (item.seconds, item,
",".join(get_value(item, schema, other)
for schema, other in self.schemas))
else:
self.from_cnt_ignored += 1
self.from_items.sort(key=lambda i:i.seconds)
from_indexer = pd.DataFrame(
generate_from_rows(),
index=None,
columns=columns)
# index to_items columns(seconds, _item, str_schema), ordered by seconds
# calculate count ignored
self.to_items.sort(key=lambda i:i.seconds)
def generate_to_rows():
for item in self.to_items:
if item.is_joinable(self.join_obj):
yield (item.seconds, item,
",".join(get_value(item, schema, other)
for other, schema in self.schemas))
else:
self.to_cnt_ignored += 1
to_indexer = pd.DataFrame(
generate_to_rows(),
index=None,
columns=columns)
# join from_items, to_items by str_schema
join_result = pd.merge(
from_indexer, to_indexer,
on=[str_schema],
suffixes=("_from", "_to"),
how="outer")
matches = join_result[(join_result["_item_from"].notnull()) &
(join_result["_item_to"].notnull())]
matches_byto = matches.groupby("_item_to")
# match items, evaluate offsets
# list of (to_item, [from_items]) sorted by to_item.seconds
matches_byto_list = []
for to_item, to_matches in matches_byto:
matches_byto_list.append((to_item,
[item for item in to_matches["_item_from"]]))
matches_byto_list.sort(key=lambda item:item[0].seconds)
for to_item, to_matches in matches_byto_list:
# sort from_items by seconds
to_matches.sort(key=lambda i:i.seconds)
for match in to_matches:
if match.is_joinable(self.join_obj):
match.set_peer(self.join_obj, to_item)
to_item.set_peer(self.join_obj, match)
offset = match.seconds - to_item.seconds
self.max_negative_offset = max(offset, self.max_negative_offset)
if offset > 0:
self.total_negative_offset += offset
self.occur_negateve_offset += 1
self.cnt_success += 1
yield self.join_obj, match.item, to_item.item
break
if debug:
# evaluate nomatch
from_nomatch = join_result[join_result["_item_to"].isnull()]["_item_from"]
to_nomatch = join_result[join_result["_item_from"].isnull()]["_item_to"]
self.from_cnt_nomatch = len(from_nomatch)
self.to_cnt_nomatch = len(to_nomatch)
# evaluate multiple matches
for _, to_matches in matches_byto:
len_m = len(to_matches)
self.to_cntmax_permatch = max(self.to_cntmax_permatch, len_m)
if len_m > 1:
self.to_occur_matches += 1
self.to_total_matches += len_m
matches_byfrom = matches.groupby("_item_from")
for _, from_matches in matches_byfrom:
len_m = len(from_matches)
self.from_cntmax_permatch = max(self.from_cntmax_permatch, len_m)
if len_m > 1:
self.from_occur_matches += 1
self.from_total_matches += len_m
# evaluate novalidmatches
for from_item, _ in matches_byfrom:
if len(from_item._peers[self.join_obj]) == 0:
self.from_cnt_novalidmatch += 1
for to_item, _ in matches_byto:
if len(to_item._peers[self.join_obj]) == 0:
self.to_cnt_novalidmatch += 1
if debug_more:
max_lines = 20
# print diagnose details
matches_byschema = matches.groupby(str_schema)
to_print = []
for s_content, lines in matches_byschema:
if len(lines) > 1:
for index, line in lines.iterrows():
to_print.append((line["seconds_from"], #l[0]
line[str_schema], #l[1]
line["_item_from"], #l[2]
line["_item_to"])) #l[3]
to_print.sort(key=lambda l:l[0])
n_lines = 0
for l in to_print:
n_lines += 1
if n_lines > max_lines:
print(" ...(%d)..." % (len(to_print)-n_lines+1))
break
if l[3] not in l[2]._peers[self.join_obj]:
label="!"
else:
label=" "
if len(l[2]._peers[self.join_obj]) == 0:
from_label="!"
else:
from_label=" "
if len(l[3]._peers[self.join_obj]) == 0:
to_label="!"
else:
to_label=" "
print(" %s%s: %s`%s`%s -> %s`%s`%s" % (
label, l[1],
l[2].seconds, l[2].item.keyword, from_label,
l[3].seconds, l[3].item.keyword, to_label))
self.report()
def report(self):
print(" success: %d" % self.cnt_success)
if self.from_cnt_ignored:
print(" fromitems ignored: %d" % self.from_cnt_ignored)
if self.from_cnt_nomatch:
print(" fromitems nomatch: %d" % self.from_cnt_nomatch)
if self.from_cnt_novalidmatch:
print(" fromitems novalidmatch: %d" % self.from_cnt_novalidmatch)
if self.from_occur_matches:
print(" fromitems MULTI-MATCH: %d(max %d, evg %.5f)" % (
self.from_occur_matches,
self.from_cntmax_permatch,
self.from_total_matches/float(self.from_occur_matches)))
if self.to_cnt_ignored:
print(" toitems ignored: %d" % self.to_cnt_ignored)
if self.to_cnt_nomatch:
print(" toitems nomatch: %d" % self.to_cnt_nomatch)
if self.to_cnt_novalidmatch:
print(" toitems novalidmatch: %d" % self.to_cnt_novalidmatch)
if self.to_occur_matches:
print(" toitems MULTI-MATCH: %d(max %d, evg %.5f)" % (
self.to_occur_matches,
self.to_cntmax_permatch,
self.to_total_matches/float(self.to_occur_matches)))
if self.occur_negateve_offset:
print(" -OFFSET: %d(max %.5f, evg %.5f)" % (
self.occur_negateve_offset,
self.max_negative_offset,
self.total_negative_offset/float(self.occur_negateve_offset)))
| |
import random
from raiden.constants import EMPTY_SECRET, MAXIMUM_PENDING_TRANSFERS
from raiden.settings import DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
from raiden.transfer import channel
from raiden.transfer.architecture import Event, TransitionResult
from raiden.transfer.events import EventPaymentSentFailed, EventPaymentSentSuccess
from raiden.transfer.mediated_transfer.events import (
CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
EventRouteFailed,
EventUnlockFailed,
EventUnlockSuccess,
SendLockedTransfer,
SendSecretReveal,
)
from raiden.transfer.mediated_transfer.state import (
InitiatorTransferState,
TransferDescriptionWithSecretState,
)
from raiden.transfer.mediated_transfer.state_change import (
ReceiveSecretRequest,
ReceiveSecretReveal,
)
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
NettingChannelState,
RouteState,
message_identifier_from_prng,
)
from raiden.transfer.state_change import Block, ContractReceiveSecretReveal, StateChange
from raiden.transfer.utils import is_valid_secret_reveal
from raiden.utils.typing import (
MYPY_ANNOTATION,
Address,
BlockExpiration,
BlockNumber,
BlockTimeout,
ChannelMap,
List,
MessageID,
Optional,
PaymentAmount,
PaymentWithFeeAmount,
Secret,
SecretHash,
TokenNetworkID,
)
def events_for_unlock_lock(
initiator_state: InitiatorTransferState,
channel_state: NettingChannelState,
secret: Secret,
secrethash: SecretHash,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Unlocks the lock offchain, and emits the events for the successful payment. """
# next hop learned the secret, unlock the token locally and send the
# lock claim message to next hop
transfer_description = initiator_state.transfer_description
message_identifier = message_identifier_from_prng(pseudo_random_generator)
unlock_lock = channel.send_unlock(
channel_state=channel_state,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
secret=secret,
secrethash=secrethash,
)
payment_sent_success = EventPaymentSentSuccess(
payment_network_identifier=channel_state.payment_network_identifier,
token_network_identifier=TokenNetworkID(channel_state.token_network_identifier),
identifier=transfer_description.payment_identifier,
amount=transfer_description.amount,
target=transfer_description.target,
secret=secret,
)
unlock_success = EventUnlockSuccess(
transfer_description.payment_identifier, transfer_description.secrethash
)
return [unlock_lock, payment_sent_success, unlock_success]
def handle_block(
initiator_state: InitiatorTransferState,
state_change: Block,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
""" Checks if the lock has expired, and if it has sends a remove expired
lock and emits the failing events.
"""
secrethash = initiator_state.transfer.lock.secrethash
locked_lock = channel_state.our_state.secrethashes_to_lockedlocks.get(secrethash)
if not locked_lock:
if channel_state.partner_state.secrethashes_to_lockedlocks.get(secrethash):
return TransitionResult(initiator_state, list())
else:
# if lock is not in our or our partner's locked locks then the
# task can go
return TransitionResult(None, list())
lock_expiration_threshold = BlockNumber(
locked_lock.expiration + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
)
lock_has_expired, _ = channel.is_lock_expired(
end_state=channel_state.our_state,
lock=locked_lock,
block_number=state_change.block_number,
lock_expiration_threshold=lock_expiration_threshold,
)
events: List[Event] = list()
if lock_has_expired and initiator_state.transfer_state != "transfer_expired":
is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED
if is_channel_open:
expired_lock_events = channel.events_for_expired_lock(
channel_state=channel_state,
locked_lock=locked_lock,
pseudo_random_generator=pseudo_random_generator,
)
events.extend(expired_lock_events)
if initiator_state.received_secret_request:
reason = "bad secret request message from target"
else:
reason = "lock expired"
transfer_description = initiator_state.transfer_description
payment_identifier = transfer_description.payment_identifier
# TODO: When we introduce multiple transfers per payment this needs to be
# reconsidered. As we would want to try other routes once a route
# has failed, and a transfer failing does not mean the entire payment
# would have to fail.
# Related issue: https://github.com/raiden-network/raiden/issues/2329
payment_failed = EventPaymentSentFailed(
payment_network_identifier=transfer_description.payment_network_identifier,
token_network_identifier=transfer_description.token_network_identifier,
identifier=payment_identifier,
target=transfer_description.target,
reason=reason,
)
route_failed = EventRouteFailed(secrethash=secrethash)
unlock_failed = EventUnlockFailed(
identifier=payment_identifier,
secrethash=initiator_state.transfer_description.secrethash,
reason=reason,
)
lock_exists = channel.lock_exists_in_either_channel_side(
channel_state=channel_state, secrethash=secrethash
)
initiator_state.transfer_state = "transfer_expired"
return TransitionResult(
# If the lock is either in our state or partner state we keep the
# task around to wait for the LockExpired messages to sync.
# Check https://github.com/raiden-network/raiden/issues/3183
initiator_state if lock_exists else None,
events + [payment_failed, route_failed, unlock_failed],
)
else:
return TransitionResult(initiator_state, events)
def get_initial_lock_expiration(
block_number: BlockNumber, reveal_timeout: BlockTimeout
) -> BlockExpiration:
""" Returns the expiration used for all hash-time-locks in transfer. """
return BlockExpiration(block_number + reveal_timeout * 2)
def next_channel_from_routes(
available_routes: List[RouteState],
channelidentifiers_to_channels: ChannelMap,
transfer_amount: PaymentAmount,
) -> Optional[NettingChannelState]:
""" Returns the first channel that can be used to start the transfer.
The routing service can race with local changes, so the recommended routes
must be validated.
"""
for route in available_routes:
channel_identifier = route.channel_identifier
channel_state = channelidentifiers_to_channels.get(channel_identifier)
if not channel_state:
continue
if channel.get_status(channel_state) != CHANNEL_STATE_OPENED:
continue
pending_transfers = channel.get_number_of_pending_transfers(channel_state.our_state)
if pending_transfers >= MAXIMUM_PENDING_TRANSFERS:
continue
distributable = channel.get_distributable(
channel_state.our_state, channel_state.partner_state
)
if transfer_amount > distributable:
continue
if channel.is_valid_amount(channel_state.our_state, transfer_amount):
return channel_state
return None
def try_new_route(
channelidentifiers_to_channels: ChannelMap,
available_routes: List[RouteState],
transfer_description: TransferDescriptionWithSecretState,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[InitiatorTransferState]:
channel_state = next_channel_from_routes(
available_routes=available_routes,
channelidentifiers_to_channels=channelidentifiers_to_channels,
transfer_amount=transfer_description.amount,
)
events: List[Event] = list()
if channel_state is None:
if not available_routes:
reason = "there is no route available"
else:
reason = "none of the available routes could be used"
transfer_failed = EventPaymentSentFailed(
payment_network_identifier=transfer_description.payment_network_identifier,
token_network_identifier=transfer_description.token_network_identifier,
identifier=transfer_description.payment_identifier,
target=transfer_description.target,
reason=reason,
)
events.append(transfer_failed)
initiator_state = None
else:
message_identifier = message_identifier_from_prng(pseudo_random_generator)
lockedtransfer_event = send_lockedtransfer(
transfer_description=transfer_description,
channel_state=channel_state,
message_identifier=message_identifier,
block_number=block_number,
)
assert lockedtransfer_event
initiator_state = InitiatorTransferState(
transfer_description=transfer_description,
channel_identifier=channel_state.identifier,
transfer=lockedtransfer_event.transfer,
revealsecret=None,
)
events.append(lockedtransfer_event)
return TransitionResult(initiator_state, events)
def send_lockedtransfer(
transfer_description: TransferDescriptionWithSecretState,
channel_state: NettingChannelState,
message_identifier: MessageID,
block_number: BlockNumber,
) -> SendLockedTransfer:
""" Create a mediated transfer using channel. """
assert channel_state.token_network_identifier == transfer_description.token_network_identifier
lock_expiration = get_initial_lock_expiration(block_number, channel_state.reveal_timeout)
# The payment amount and the fee amount must be included in the locked
# amount, as a guarantee to the mediator that the fee will be claimable
# on-chain.
total_amount = PaymentWithFeeAmount(
transfer_description.amount + transfer_description.allocated_fee
)
lockedtransfer_event = channel.send_lockedtransfer(
channel_state=channel_state,
initiator=transfer_description.initiator,
target=transfer_description.target,
amount=total_amount,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
expiration=lock_expiration,
secrethash=transfer_description.secrethash,
)
return lockedtransfer_event
def handle_secretrequest(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretRequest,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
is_message_from_target = (
state_change.sender == initiator_state.transfer_description.target
and state_change.secrethash == initiator_state.transfer_description.secrethash
and state_change.payment_identifier
== initiator_state.transfer_description.payment_identifier
)
lock = channel.get_lock(
channel_state.our_state, initiator_state.transfer_description.secrethash
)
# This should not ever happen. This task clears itself when the lock is
# removed.
assert lock is not None, "channel is does not have the transfer's lock"
already_received_secret_request = initiator_state.received_secret_request
# lock.amount includes the fees, transfer_description.amount is the actual
# payment amount, for the transfer to be valid and the unlock allowed the
# target must receive an amount between these values.
is_valid_secretrequest = (
state_change.amount <= lock.amount
and state_change.amount >= initiator_state.transfer_description.amount
and state_change.expiration == lock.expiration
and initiator_state.transfer_description.secret != EMPTY_SECRET
)
if already_received_secret_request and is_message_from_target:
# A secret request was received earlier, all subsequent are ignored
# as it might be an attack
iteration = TransitionResult(initiator_state, list())
elif is_valid_secretrequest and is_message_from_target:
# Reveal the secret to the target node and wait for its confirmation.
# At this point the transfer is not cancellable anymore as either the lock
# timeouts or a secret reveal is received.
#
# Note: The target might be the first hop
#
message_identifier = message_identifier_from_prng(pseudo_random_generator)
transfer_description = initiator_state.transfer_description
recipient = transfer_description.target
revealsecret = SendSecretReveal(
recipient=Address(recipient),
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
message_identifier=message_identifier,
secret=transfer_description.secret,
)
initiator_state.revealsecret = revealsecret
initiator_state.received_secret_request = True
iteration = TransitionResult(initiator_state, [revealsecret])
elif not is_valid_secretrequest and is_message_from_target:
initiator_state.received_secret_request = True
iteration = TransitionResult(initiator_state, list())
else:
iteration = TransitionResult(initiator_state, list())
return iteration
def handle_offchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
""" Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the merkle tree and the
transferred amount updated.
"""
iteration: TransitionResult[InitiatorTransferState]
valid_reveal = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=initiator_state.transfer_description.secrethash,
secret=state_change.secret,
)
sent_by_partner = state_change.sender == channel_state.partner_state.address
is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED
if valid_reveal and is_channel_open and sent_by_partner:
events = events_for_unlock_lock(
initiator_state=initiator_state,
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
pseudo_random_generator=pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration
def handle_onchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
""" When a secret is revealed on-chain all nodes learn the secret.
This check the on-chain secret corresponds to the one used by the
initiator, and if valid a new balance proof is sent to the next hop with
the current lock removed from the merkle tree and the transferred amount
updated.
"""
iteration: TransitionResult[InitiatorTransferState]
secret = state_change.secret
secrethash = initiator_state.transfer_description.secrethash
is_valid_secret = is_valid_secret_reveal(
state_change=state_change, transfer_secrethash=secrethash, secret=secret
)
is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED
is_lock_expired = state_change.block_number > initiator_state.transfer.lock.expiration
is_lock_unlocked = is_valid_secret and not is_lock_expired
if is_lock_unlocked:
channel.register_onchain_secret(
channel_state=channel_state,
secret=secret,
secrethash=secrethash,
secret_reveal_block_number=state_change.block_number,
)
if is_lock_unlocked and is_channel_open:
events = events_for_unlock_lock(
initiator_state,
channel_state,
state_change.secret,
state_change.secrethash,
pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration
def state_transition(
initiator_state: InitiatorTransferState,
state_change: StateChange,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
if type(state_change) == Block:
assert isinstance(state_change, Block), MYPY_ANNOTATION
iteration = handle_block(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ReceiveSecretRequest:
assert isinstance(state_change, ReceiveSecretRequest), MYPY_ANNOTATION
iteration = handle_secretrequest(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ReceiveSecretReveal:
assert isinstance(state_change, ReceiveSecretReveal), MYPY_ANNOTATION
iteration = handle_offchain_secretreveal(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ContractReceiveSecretReveal:
assert isinstance(state_change, ContractReceiveSecretReveal), MYPY_ANNOTATION
iteration = handle_onchain_secretreveal(
initiator_state, state_change, channel_state, pseudo_random_generator
)
else:
iteration = TransitionResult(initiator_state, list())
return iteration
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.